blob: a174e616eda5a9872e4a76be9426191e48ff7b9b [file] [log] [blame]
Linus Torvalds1da177e2005-04-16 15:20:36 -07001/*
2 * tg3.c: Broadcom Tigon3 ethernet driver.
3 *
4 * Copyright (C) 2001, 2002, 2003, 2004 David S. Miller (davem@redhat.com)
5 * Copyright (C) 2001, 2002, 2003 Jeff Garzik (jgarzik@pobox.com)
6 * Copyright (C) 2004 Sun Microsystems Inc.
7 * Copyright (C) 2005 Broadcom Corporation.
8 *
9 * Firmware is:
Michael Chan49cabf42005-06-06 15:15:17 -070010 * Derived from proprietary unpublished source code,
11 * Copyright (C) 2000-2003 Broadcom Corporation.
12 *
13 * Permission is hereby granted for the distribution of this firmware
14 * data in hexadecimal or equivalent format, provided this copyright
15 * notice is accompanying it.
Linus Torvalds1da177e2005-04-16 15:20:36 -070016 */
17
18#include <linux/config.h>
19
20#include <linux/module.h>
21#include <linux/moduleparam.h>
22#include <linux/kernel.h>
23#include <linux/types.h>
24#include <linux/compiler.h>
25#include <linux/slab.h>
26#include <linux/delay.h>
27#include <linux/init.h>
28#include <linux/ioport.h>
29#include <linux/pci.h>
30#include <linux/netdevice.h>
31#include <linux/etherdevice.h>
32#include <linux/skbuff.h>
33#include <linux/ethtool.h>
34#include <linux/mii.h>
35#include <linux/if_vlan.h>
36#include <linux/ip.h>
37#include <linux/tcp.h>
38#include <linux/workqueue.h>
39
40#include <net/checksum.h>
41
42#include <asm/system.h>
43#include <asm/io.h>
44#include <asm/byteorder.h>
45#include <asm/uaccess.h>
46
47#ifdef CONFIG_SPARC64
48#include <asm/idprom.h>
49#include <asm/oplib.h>
50#include <asm/pbm.h>
51#endif
52
53#if defined(CONFIG_VLAN_8021Q) || defined(CONFIG_VLAN_8021Q_MODULE)
54#define TG3_VLAN_TAG_USED 1
55#else
56#define TG3_VLAN_TAG_USED 0
57#endif
58
59#ifdef NETIF_F_TSO
60#define TG3_TSO_SUPPORT 1
61#else
62#define TG3_TSO_SUPPORT 0
63#endif
64
65#include "tg3.h"
66
67#define DRV_MODULE_NAME "tg3"
68#define PFX DRV_MODULE_NAME ": "
David S. Miller75c80c32005-09-01 17:42:23 -070069#define DRV_MODULE_VERSION "3.38"
70#define DRV_MODULE_RELDATE "September 1, 2005"
Linus Torvalds1da177e2005-04-16 15:20:36 -070071
72#define TG3_DEF_MAC_MODE 0
73#define TG3_DEF_RX_MODE 0
74#define TG3_DEF_TX_MODE 0
75#define TG3_DEF_MSG_ENABLE \
76 (NETIF_MSG_DRV | \
77 NETIF_MSG_PROBE | \
78 NETIF_MSG_LINK | \
79 NETIF_MSG_TIMER | \
80 NETIF_MSG_IFDOWN | \
81 NETIF_MSG_IFUP | \
82 NETIF_MSG_RX_ERR | \
83 NETIF_MSG_TX_ERR)
84
85/* length of time before we decide the hardware is borked,
86 * and dev->tx_timeout() should be called to fix the problem
87 */
88#define TG3_TX_TIMEOUT (5 * HZ)
89
90/* hardware minimum and maximum for a single frame's data payload */
91#define TG3_MIN_MTU 60
92#define TG3_MAX_MTU(tp) \
Michael Chan0f893dc2005-07-25 12:30:38 -070093 ((tp->tg3_flags2 & TG3_FLG2_JUMBO_CAPABLE) ? 9000 : 1500)
Linus Torvalds1da177e2005-04-16 15:20:36 -070094
95/* These numbers seem to be hard coded in the NIC firmware somehow.
96 * You can't change the ring sizes, but you can change where you place
97 * them in the NIC onboard memory.
98 */
99#define TG3_RX_RING_SIZE 512
100#define TG3_DEF_RX_RING_PENDING 200
101#define TG3_RX_JUMBO_RING_SIZE 256
102#define TG3_DEF_RX_JUMBO_RING_PENDING 100
103
104/* Do not place this n-ring entries value into the tp struct itself,
105 * we really want to expose these constants to GCC so that modulo et
106 * al. operations are done with shifts and masks instead of with
107 * hw multiply/modulo instructions. Another solution would be to
108 * replace things like '% foo' with '& (foo - 1)'.
109 */
110#define TG3_RX_RCB_RING_SIZE(tp) \
111 ((tp->tg3_flags2 & TG3_FLG2_5705_PLUS) ? 512 : 1024)
112
113#define TG3_TX_RING_SIZE 512
114#define TG3_DEF_TX_RING_PENDING (TG3_TX_RING_SIZE - 1)
115
116#define TG3_RX_RING_BYTES (sizeof(struct tg3_rx_buffer_desc) * \
117 TG3_RX_RING_SIZE)
118#define TG3_RX_JUMBO_RING_BYTES (sizeof(struct tg3_rx_buffer_desc) * \
119 TG3_RX_JUMBO_RING_SIZE)
120#define TG3_RX_RCB_RING_BYTES(tp) (sizeof(struct tg3_rx_buffer_desc) * \
121 TG3_RX_RCB_RING_SIZE(tp))
122#define TG3_TX_RING_BYTES (sizeof(struct tg3_tx_buffer_desc) * \
123 TG3_TX_RING_SIZE)
Linus Torvalds1da177e2005-04-16 15:20:36 -0700124#define TX_BUFFS_AVAIL(TP) \
Michael Chan51b91462005-09-01 17:41:28 -0700125 ((TP)->tx_pending - \
126 (((TP)->tx_prod - (TP)->tx_cons) & (TG3_TX_RING_SIZE - 1)))
Linus Torvalds1da177e2005-04-16 15:20:36 -0700127#define NEXT_TX(N) (((N) + 1) & (TG3_TX_RING_SIZE - 1))
128
129#define RX_PKT_BUF_SZ (1536 + tp->rx_offset + 64)
130#define RX_JUMBO_PKT_BUF_SZ (9046 + tp->rx_offset + 64)
131
132/* minimum number of free TX descriptors required to wake up TX process */
133#define TG3_TX_WAKEUP_THRESH (TG3_TX_RING_SIZE / 4)
134
135/* number of ETHTOOL_GSTATS u64's */
136#define TG3_NUM_STATS (sizeof(struct tg3_ethtool_stats)/sizeof(u64))
137
Michael Chan4cafd3f2005-05-29 14:56:34 -0700138#define TG3_NUM_TEST 6
139
Linus Torvalds1da177e2005-04-16 15:20:36 -0700140static char version[] __devinitdata =
141 DRV_MODULE_NAME ".c:v" DRV_MODULE_VERSION " (" DRV_MODULE_RELDATE ")\n";
142
143MODULE_AUTHOR("David S. Miller (davem@redhat.com) and Jeff Garzik (jgarzik@pobox.com)");
144MODULE_DESCRIPTION("Broadcom Tigon3 ethernet driver");
145MODULE_LICENSE("GPL");
146MODULE_VERSION(DRV_MODULE_VERSION);
147
148static int tg3_debug = -1; /* -1 == use TG3_DEF_MSG_ENABLE as value */
149module_param(tg3_debug, int, 0);
150MODULE_PARM_DESC(tg3_debug, "Tigon3 bitmapped debugging message enable value");
151
152static struct pci_device_id tg3_pci_tbl[] = {
153 { PCI_VENDOR_ID_BROADCOM, PCI_DEVICE_ID_TIGON3_5700,
154 PCI_ANY_ID, PCI_ANY_ID, 0, 0, 0UL },
155 { PCI_VENDOR_ID_BROADCOM, PCI_DEVICE_ID_TIGON3_5701,
156 PCI_ANY_ID, PCI_ANY_ID, 0, 0, 0UL },
157 { PCI_VENDOR_ID_BROADCOM, PCI_DEVICE_ID_TIGON3_5702,
158 PCI_ANY_ID, PCI_ANY_ID, 0, 0, 0UL },
159 { PCI_VENDOR_ID_BROADCOM, PCI_DEVICE_ID_TIGON3_5703,
160 PCI_ANY_ID, PCI_ANY_ID, 0, 0, 0UL },
161 { PCI_VENDOR_ID_BROADCOM, PCI_DEVICE_ID_TIGON3_5704,
162 PCI_ANY_ID, PCI_ANY_ID, 0, 0, 0UL },
163 { PCI_VENDOR_ID_BROADCOM, PCI_DEVICE_ID_TIGON3_5702FE,
164 PCI_ANY_ID, PCI_ANY_ID, 0, 0, 0UL },
165 { PCI_VENDOR_ID_BROADCOM, PCI_DEVICE_ID_TIGON3_5705,
166 PCI_ANY_ID, PCI_ANY_ID, 0, 0, 0UL },
167 { PCI_VENDOR_ID_BROADCOM, PCI_DEVICE_ID_TIGON3_5705_2,
168 PCI_ANY_ID, PCI_ANY_ID, 0, 0, 0UL },
169 { PCI_VENDOR_ID_BROADCOM, PCI_DEVICE_ID_TIGON3_5705M,
170 PCI_ANY_ID, PCI_ANY_ID, 0, 0, 0UL },
171 { PCI_VENDOR_ID_BROADCOM, PCI_DEVICE_ID_TIGON3_5705M_2,
172 PCI_ANY_ID, PCI_ANY_ID, 0, 0, 0UL },
173 { PCI_VENDOR_ID_BROADCOM, PCI_DEVICE_ID_TIGON3_5702X,
174 PCI_ANY_ID, PCI_ANY_ID, 0, 0, 0UL },
175 { PCI_VENDOR_ID_BROADCOM, PCI_DEVICE_ID_TIGON3_5703X,
176 PCI_ANY_ID, PCI_ANY_ID, 0, 0, 0UL },
177 { PCI_VENDOR_ID_BROADCOM, PCI_DEVICE_ID_TIGON3_5704S,
178 PCI_ANY_ID, PCI_ANY_ID, 0, 0, 0UL },
179 { PCI_VENDOR_ID_BROADCOM, PCI_DEVICE_ID_TIGON3_5702A3,
180 PCI_ANY_ID, PCI_ANY_ID, 0, 0, 0UL },
181 { PCI_VENDOR_ID_BROADCOM, PCI_DEVICE_ID_TIGON3_5703A3,
182 PCI_ANY_ID, PCI_ANY_ID, 0, 0, 0UL },
183 { PCI_VENDOR_ID_BROADCOM, PCI_DEVICE_ID_TIGON3_5782,
184 PCI_ANY_ID, PCI_ANY_ID, 0, 0, 0UL },
185 { PCI_VENDOR_ID_BROADCOM, PCI_DEVICE_ID_TIGON3_5788,
186 PCI_ANY_ID, PCI_ANY_ID, 0, 0, 0UL },
187 { PCI_VENDOR_ID_BROADCOM, PCI_DEVICE_ID_TIGON3_5789,
188 PCI_ANY_ID, PCI_ANY_ID, 0, 0, 0UL },
189 { PCI_VENDOR_ID_BROADCOM, PCI_DEVICE_ID_TIGON3_5901,
190 PCI_ANY_ID, PCI_ANY_ID, 0, 0, 0UL },
191 { PCI_VENDOR_ID_BROADCOM, PCI_DEVICE_ID_TIGON3_5901_2,
192 PCI_ANY_ID, PCI_ANY_ID, 0, 0, 0UL },
193 { PCI_VENDOR_ID_BROADCOM, PCI_DEVICE_ID_TIGON3_5704S_2,
194 PCI_ANY_ID, PCI_ANY_ID, 0, 0, 0UL },
195 { PCI_VENDOR_ID_BROADCOM, PCI_DEVICE_ID_TIGON3_5705F,
196 PCI_ANY_ID, PCI_ANY_ID, 0, 0, 0UL },
197 { PCI_VENDOR_ID_BROADCOM, PCI_DEVICE_ID_TIGON3_5720,
198 PCI_ANY_ID, PCI_ANY_ID, 0, 0, 0UL },
199 { PCI_VENDOR_ID_BROADCOM, PCI_DEVICE_ID_TIGON3_5721,
200 PCI_ANY_ID, PCI_ANY_ID, 0, 0, 0UL },
201 { PCI_VENDOR_ID_BROADCOM, PCI_DEVICE_ID_TIGON3_5750,
202 PCI_ANY_ID, PCI_ANY_ID, 0, 0, 0UL },
203 { PCI_VENDOR_ID_BROADCOM, PCI_DEVICE_ID_TIGON3_5751,
204 PCI_ANY_ID, PCI_ANY_ID, 0, 0, 0UL },
205 { PCI_VENDOR_ID_BROADCOM, PCI_DEVICE_ID_TIGON3_5750M,
206 PCI_ANY_ID, PCI_ANY_ID, 0, 0, 0UL },
207 { PCI_VENDOR_ID_BROADCOM, PCI_DEVICE_ID_TIGON3_5751M,
208 PCI_ANY_ID, PCI_ANY_ID, 0, 0, 0UL },
209 { PCI_VENDOR_ID_BROADCOM, PCI_DEVICE_ID_TIGON3_5751F,
210 PCI_ANY_ID, PCI_ANY_ID, 0, 0, 0UL },
John W. Linville6e9017a2005-04-21 16:58:56 -0700211 { PCI_VENDOR_ID_BROADCOM, PCI_DEVICE_ID_TIGON3_5752,
John W. Linvilleaf2bcd92005-04-21 16:57:50 -0700212 PCI_ANY_ID, PCI_ANY_ID, 0, 0, 0UL },
Xose Vazquez Perezd8659252005-05-23 12:54:51 -0700213 { PCI_VENDOR_ID_BROADCOM, PCI_DEVICE_ID_TIGON3_5752M,
214 PCI_ANY_ID, PCI_ANY_ID, 0, 0, 0UL },
Linus Torvalds1da177e2005-04-16 15:20:36 -0700215 { PCI_VENDOR_ID_BROADCOM, PCI_DEVICE_ID_TIGON3_5753,
216 PCI_ANY_ID, PCI_ANY_ID, 0, 0, 0UL },
217 { PCI_VENDOR_ID_BROADCOM, PCI_DEVICE_ID_TIGON3_5753M,
218 PCI_ANY_ID, PCI_ANY_ID, 0, 0, 0UL },
219 { PCI_VENDOR_ID_BROADCOM, PCI_DEVICE_ID_TIGON3_5753F,
220 PCI_ANY_ID, PCI_ANY_ID, 0, 0, 0UL },
Michael Chan4cf78e42005-07-25 12:29:19 -0700221 { PCI_VENDOR_ID_BROADCOM, PCI_DEVICE_ID_TIGON3_5780,
222 PCI_ANY_ID, PCI_ANY_ID, 0, 0, 0UL },
223 { PCI_VENDOR_ID_BROADCOM, PCI_DEVICE_ID_TIGON3_5780S,
224 PCI_ANY_ID, PCI_ANY_ID, 0, 0, 0UL },
Linus Torvalds1da177e2005-04-16 15:20:36 -0700225 { PCI_VENDOR_ID_BROADCOM, PCI_DEVICE_ID_TIGON3_5781,
226 PCI_ANY_ID, PCI_ANY_ID, 0, 0, 0UL },
227 { PCI_VENDOR_ID_SYSKONNECT, PCI_DEVICE_ID_SYSKONNECT_9DXX,
228 PCI_ANY_ID, PCI_ANY_ID, 0, 0, 0UL },
229 { PCI_VENDOR_ID_SYSKONNECT, PCI_DEVICE_ID_SYSKONNECT_9MXX,
230 PCI_ANY_ID, PCI_ANY_ID, 0, 0, 0UL },
231 { PCI_VENDOR_ID_ALTIMA, PCI_DEVICE_ID_ALTIMA_AC1000,
232 PCI_ANY_ID, PCI_ANY_ID, 0, 0, 0UL },
233 { PCI_VENDOR_ID_ALTIMA, PCI_DEVICE_ID_ALTIMA_AC1001,
234 PCI_ANY_ID, PCI_ANY_ID, 0, 0, 0UL },
235 { PCI_VENDOR_ID_ALTIMA, PCI_DEVICE_ID_ALTIMA_AC1003,
236 PCI_ANY_ID, PCI_ANY_ID, 0, 0, 0UL },
237 { PCI_VENDOR_ID_ALTIMA, PCI_DEVICE_ID_ALTIMA_AC9100,
238 PCI_ANY_ID, PCI_ANY_ID, 0, 0, 0UL },
239 { PCI_VENDOR_ID_APPLE, PCI_DEVICE_ID_APPLE_TIGON3,
240 PCI_ANY_ID, PCI_ANY_ID, 0, 0, 0UL },
241 { 0, }
242};
243
244MODULE_DEVICE_TABLE(pci, tg3_pci_tbl);
245
246static struct {
247 const char string[ETH_GSTRING_LEN];
248} ethtool_stats_keys[TG3_NUM_STATS] = {
249 { "rx_octets" },
250 { "rx_fragments" },
251 { "rx_ucast_packets" },
252 { "rx_mcast_packets" },
253 { "rx_bcast_packets" },
254 { "rx_fcs_errors" },
255 { "rx_align_errors" },
256 { "rx_xon_pause_rcvd" },
257 { "rx_xoff_pause_rcvd" },
258 { "rx_mac_ctrl_rcvd" },
259 { "rx_xoff_entered" },
260 { "rx_frame_too_long_errors" },
261 { "rx_jabbers" },
262 { "rx_undersize_packets" },
263 { "rx_in_length_errors" },
264 { "rx_out_length_errors" },
265 { "rx_64_or_less_octet_packets" },
266 { "rx_65_to_127_octet_packets" },
267 { "rx_128_to_255_octet_packets" },
268 { "rx_256_to_511_octet_packets" },
269 { "rx_512_to_1023_octet_packets" },
270 { "rx_1024_to_1522_octet_packets" },
271 { "rx_1523_to_2047_octet_packets" },
272 { "rx_2048_to_4095_octet_packets" },
273 { "rx_4096_to_8191_octet_packets" },
274 { "rx_8192_to_9022_octet_packets" },
275
276 { "tx_octets" },
277 { "tx_collisions" },
278
279 { "tx_xon_sent" },
280 { "tx_xoff_sent" },
281 { "tx_flow_control" },
282 { "tx_mac_errors" },
283 { "tx_single_collisions" },
284 { "tx_mult_collisions" },
285 { "tx_deferred" },
286 { "tx_excessive_collisions" },
287 { "tx_late_collisions" },
288 { "tx_collide_2times" },
289 { "tx_collide_3times" },
290 { "tx_collide_4times" },
291 { "tx_collide_5times" },
292 { "tx_collide_6times" },
293 { "tx_collide_7times" },
294 { "tx_collide_8times" },
295 { "tx_collide_9times" },
296 { "tx_collide_10times" },
297 { "tx_collide_11times" },
298 { "tx_collide_12times" },
299 { "tx_collide_13times" },
300 { "tx_collide_14times" },
301 { "tx_collide_15times" },
302 { "tx_ucast_packets" },
303 { "tx_mcast_packets" },
304 { "tx_bcast_packets" },
305 { "tx_carrier_sense_errors" },
306 { "tx_discards" },
307 { "tx_errors" },
308
309 { "dma_writeq_full" },
310 { "dma_write_prioq_full" },
311 { "rxbds_empty" },
312 { "rx_discards" },
313 { "rx_errors" },
314 { "rx_threshold_hit" },
315
316 { "dma_readq_full" },
317 { "dma_read_prioq_full" },
318 { "tx_comp_queue_full" },
319
320 { "ring_set_send_prod_index" },
321 { "ring_status_update" },
322 { "nic_irqs" },
323 { "nic_avoided_irqs" },
324 { "nic_tx_threshold_hit" }
325};
326
Michael Chan4cafd3f2005-05-29 14:56:34 -0700327static struct {
328 const char string[ETH_GSTRING_LEN];
329} ethtool_test_keys[TG3_NUM_TEST] = {
330 { "nvram test (online) " },
331 { "link test (online) " },
332 { "register test (offline)" },
333 { "memory test (offline)" },
334 { "loopback test (offline)" },
335 { "interrupt test (offline)" },
336};
337
Linus Torvalds1da177e2005-04-16 15:20:36 -0700338static void tg3_write_indirect_reg32(struct tg3 *tp, u32 off, u32 val)
339{
Michael Chan68929142005-08-09 20:17:14 -0700340 unsigned long flags;
341
342 spin_lock_irqsave(&tp->indirect_lock, flags);
Michael Chan1ee582d2005-08-09 20:16:46 -0700343 pci_write_config_dword(tp->pdev, TG3PCI_REG_BASE_ADDR, off);
344 pci_write_config_dword(tp->pdev, TG3PCI_REG_DATA, val);
Michael Chan68929142005-08-09 20:17:14 -0700345 spin_unlock_irqrestore(&tp->indirect_lock, flags);
Michael Chan1ee582d2005-08-09 20:16:46 -0700346}
347
348static void tg3_write_flush_reg32(struct tg3 *tp, u32 off, u32 val)
349{
350 writel(val, tp->regs + off);
351 readl(tp->regs + off);
Linus Torvalds1da177e2005-04-16 15:20:36 -0700352}
353
Michael Chan68929142005-08-09 20:17:14 -0700354static u32 tg3_read_indirect_reg32(struct tg3 *tp, u32 off)
355{
356 unsigned long flags;
357 u32 val;
358
359 spin_lock_irqsave(&tp->indirect_lock, flags);
360 pci_write_config_dword(tp->pdev, TG3PCI_REG_BASE_ADDR, off);
361 pci_read_config_dword(tp->pdev, TG3PCI_REG_DATA, &val);
362 spin_unlock_irqrestore(&tp->indirect_lock, flags);
363 return val;
364}
365
366static void tg3_write_indirect_mbox(struct tg3 *tp, u32 off, u32 val)
367{
368 unsigned long flags;
369
370 if (off == (MAILBOX_RCVRET_CON_IDX_0 + TG3_64BIT_REG_LOW)) {
371 pci_write_config_dword(tp->pdev, TG3PCI_RCV_RET_RING_CON_IDX +
372 TG3_64BIT_REG_LOW, val);
373 return;
374 }
375 if (off == (MAILBOX_RCV_STD_PROD_IDX + TG3_64BIT_REG_LOW)) {
376 pci_write_config_dword(tp->pdev, TG3PCI_STD_RING_PROD_IDX +
377 TG3_64BIT_REG_LOW, val);
378 return;
379 }
380
381 spin_lock_irqsave(&tp->indirect_lock, flags);
382 pci_write_config_dword(tp->pdev, TG3PCI_REG_BASE_ADDR, off + 0x5600);
383 pci_write_config_dword(tp->pdev, TG3PCI_REG_DATA, val);
384 spin_unlock_irqrestore(&tp->indirect_lock, flags);
385
386 /* In indirect mode when disabling interrupts, we also need
387 * to clear the interrupt bit in the GRC local ctrl register.
388 */
389 if ((off == (MAILBOX_INTERRUPT_0 + TG3_64BIT_REG_LOW)) &&
390 (val == 0x1)) {
391 pci_write_config_dword(tp->pdev, TG3PCI_MISC_LOCAL_CTRL,
392 tp->grc_local_ctrl|GRC_LCLCTRL_CLEARINT);
393 }
394}
395
396static u32 tg3_read_indirect_mbox(struct tg3 *tp, u32 off)
397{
398 unsigned long flags;
399 u32 val;
400
401 spin_lock_irqsave(&tp->indirect_lock, flags);
402 pci_write_config_dword(tp->pdev, TG3PCI_REG_BASE_ADDR, off + 0x5600);
403 pci_read_config_dword(tp->pdev, TG3PCI_REG_DATA, &val);
404 spin_unlock_irqrestore(&tp->indirect_lock, flags);
405 return val;
406}
407
Linus Torvalds1da177e2005-04-16 15:20:36 -0700408static void _tw32_flush(struct tg3 *tp, u32 off, u32 val)
409{
Michael Chan68929142005-08-09 20:17:14 -0700410 tp->write32(tp, off, val);
411 if (!(tp->tg3_flags & TG3_FLAG_PCIX_TARGET_HWBUG) &&
412 !(tp->tg3_flags & TG3_FLAG_5701_REG_WRITE_BUG) &&
413 !(tp->tg3_flags2 & TG3_FLG2_ICH_WORKAROUND))
414 tp->read32(tp, off); /* flush */
Linus Torvalds1da177e2005-04-16 15:20:36 -0700415}
416
Michael Chan09ee9292005-08-09 20:17:00 -0700417static inline void tw32_mailbox_flush(struct tg3 *tp, u32 off, u32 val)
418{
419 tp->write32_mbox(tp, off, val);
Michael Chan68929142005-08-09 20:17:14 -0700420 if (!(tp->tg3_flags & TG3_FLAG_MBOX_WRITE_REORDER) &&
421 !(tp->tg3_flags2 & TG3_FLG2_ICH_WORKAROUND))
422 tp->read32_mbox(tp, off);
Michael Chan09ee9292005-08-09 20:17:00 -0700423}
424
Michael Chan20094932005-08-09 20:16:32 -0700425static void tg3_write32_tx_mbox(struct tg3 *tp, u32 off, u32 val)
Linus Torvalds1da177e2005-04-16 15:20:36 -0700426{
427 void __iomem *mbox = tp->regs + off;
428 writel(val, mbox);
429 if (tp->tg3_flags & TG3_FLAG_TXD_MBOX_HWBUG)
430 writel(val, mbox);
431 if (tp->tg3_flags & TG3_FLAG_MBOX_WRITE_REORDER)
432 readl(mbox);
433}
434
Michael Chan20094932005-08-09 20:16:32 -0700435static void tg3_write32(struct tg3 *tp, u32 off, u32 val)
436{
437 writel(val, tp->regs + off);
438}
Linus Torvalds1da177e2005-04-16 15:20:36 -0700439
Michael Chan20094932005-08-09 20:16:32 -0700440static u32 tg3_read32(struct tg3 *tp, u32 off)
441{
442 return (readl(tp->regs + off));
443}
444
445#define tw32_mailbox(reg, val) tp->write32_mbox(tp, reg, val)
Michael Chan09ee9292005-08-09 20:17:00 -0700446#define tw32_mailbox_f(reg, val) tw32_mailbox_flush(tp, (reg), (val))
Michael Chan20094932005-08-09 20:16:32 -0700447#define tw32_rx_mbox(reg, val) tp->write32_rx_mbox(tp, reg, val)
448#define tw32_tx_mbox(reg, val) tp->write32_tx_mbox(tp, reg, val)
Michael Chan09ee9292005-08-09 20:17:00 -0700449#define tr32_mailbox(reg) tp->read32_mbox(tp, reg)
Michael Chan20094932005-08-09 20:16:32 -0700450
451#define tw32(reg,val) tp->write32(tp, reg, val)
Linus Torvalds1da177e2005-04-16 15:20:36 -0700452#define tw32_f(reg,val) _tw32_flush(tp,(reg),(val))
Michael Chan20094932005-08-09 20:16:32 -0700453#define tr32(reg) tp->read32(tp, reg)
Linus Torvalds1da177e2005-04-16 15:20:36 -0700454
455static void tg3_write_mem(struct tg3 *tp, u32 off, u32 val)
456{
Michael Chan68929142005-08-09 20:17:14 -0700457 unsigned long flags;
458
459 spin_lock_irqsave(&tp->indirect_lock, flags);
Linus Torvalds1da177e2005-04-16 15:20:36 -0700460 pci_write_config_dword(tp->pdev, TG3PCI_MEM_WIN_BASE_ADDR, off);
461 pci_write_config_dword(tp->pdev, TG3PCI_MEM_WIN_DATA, val);
462
463 /* Always leave this as zero. */
464 pci_write_config_dword(tp->pdev, TG3PCI_MEM_WIN_BASE_ADDR, 0);
Michael Chan68929142005-08-09 20:17:14 -0700465 spin_unlock_irqrestore(&tp->indirect_lock, flags);
Linus Torvalds1da177e2005-04-16 15:20:36 -0700466}
467
468static void tg3_read_mem(struct tg3 *tp, u32 off, u32 *val)
469{
Michael Chan68929142005-08-09 20:17:14 -0700470 unsigned long flags;
471
472 spin_lock_irqsave(&tp->indirect_lock, flags);
Linus Torvalds1da177e2005-04-16 15:20:36 -0700473 pci_write_config_dword(tp->pdev, TG3PCI_MEM_WIN_BASE_ADDR, off);
474 pci_read_config_dword(tp->pdev, TG3PCI_MEM_WIN_DATA, val);
475
476 /* Always leave this as zero. */
477 pci_write_config_dword(tp->pdev, TG3PCI_MEM_WIN_BASE_ADDR, 0);
Michael Chan68929142005-08-09 20:17:14 -0700478 spin_unlock_irqrestore(&tp->indirect_lock, flags);
Linus Torvalds1da177e2005-04-16 15:20:36 -0700479}
480
481static void tg3_disable_ints(struct tg3 *tp)
482{
483 tw32(TG3PCI_MISC_HOST_CTRL,
484 (tp->misc_host_ctrl | MISC_HOST_CTRL_MASK_PCI_INT));
Michael Chan09ee9292005-08-09 20:17:00 -0700485 tw32_mailbox_f(MAILBOX_INTERRUPT_0 + TG3_64BIT_REG_LOW, 0x00000001);
Linus Torvalds1da177e2005-04-16 15:20:36 -0700486}
487
488static inline void tg3_cond_int(struct tg3 *tp)
489{
490 if (tp->hw_status->status & SD_STATUS_UPDATED)
491 tw32(GRC_LOCAL_CTRL, tp->grc_local_ctrl | GRC_LCLCTRL_SETINT);
492}
493
494static void tg3_enable_ints(struct tg3 *tp)
495{
Michael Chanbbe832c2005-06-24 20:20:04 -0700496 tp->irq_sync = 0;
497 wmb();
498
Linus Torvalds1da177e2005-04-16 15:20:36 -0700499 tw32(TG3PCI_MISC_HOST_CTRL,
500 (tp->misc_host_ctrl & ~MISC_HOST_CTRL_MASK_PCI_INT));
Michael Chan09ee9292005-08-09 20:17:00 -0700501 tw32_mailbox_f(MAILBOX_INTERRUPT_0 + TG3_64BIT_REG_LOW,
502 (tp->last_tag << 24));
Linus Torvalds1da177e2005-04-16 15:20:36 -0700503 tg3_cond_int(tp);
504}
505
Michael Chan04237dd2005-04-25 15:17:17 -0700506static inline unsigned int tg3_has_work(struct tg3 *tp)
507{
508 struct tg3_hw_status *sblk = tp->hw_status;
509 unsigned int work_exists = 0;
510
511 /* check for phy events */
512 if (!(tp->tg3_flags &
513 (TG3_FLAG_USE_LINKCHG_REG |
514 TG3_FLAG_POLL_SERDES))) {
515 if (sblk->status & SD_STATUS_LINK_CHG)
516 work_exists = 1;
517 }
518 /* check for RX/TX work to do */
519 if (sblk->idx[0].tx_consumer != tp->tx_cons ||
520 sblk->idx[0].rx_producer != tp->rx_rcb_ptr)
521 work_exists = 1;
522
523 return work_exists;
524}
525
Linus Torvalds1da177e2005-04-16 15:20:36 -0700526/* tg3_restart_ints
Michael Chan04237dd2005-04-25 15:17:17 -0700527 * similar to tg3_enable_ints, but it accurately determines whether there
528 * is new work pending and can return without flushing the PIO write
529 * which reenables interrupts
Linus Torvalds1da177e2005-04-16 15:20:36 -0700530 */
531static void tg3_restart_ints(struct tg3 *tp)
532{
David S. Millerfac9b832005-05-18 22:46:34 -0700533 tw32_mailbox(MAILBOX_INTERRUPT_0 + TG3_64BIT_REG_LOW,
534 tp->last_tag << 24);
Linus Torvalds1da177e2005-04-16 15:20:36 -0700535 mmiowb();
536
David S. Millerfac9b832005-05-18 22:46:34 -0700537 /* When doing tagged status, this work check is unnecessary.
538 * The last_tag we write above tells the chip which piece of
539 * work we've completed.
540 */
541 if (!(tp->tg3_flags & TG3_FLAG_TAGGED_STATUS) &&
542 tg3_has_work(tp))
Michael Chan04237dd2005-04-25 15:17:17 -0700543 tw32(HOSTCC_MODE, tp->coalesce_mode |
544 (HOSTCC_MODE_ENABLE | HOSTCC_MODE_NOW));
Linus Torvalds1da177e2005-04-16 15:20:36 -0700545}
546
547static inline void tg3_netif_stop(struct tg3 *tp)
548{
Michael Chanbbe832c2005-06-24 20:20:04 -0700549 tp->dev->trans_start = jiffies; /* prevent tx timeout */
Linus Torvalds1da177e2005-04-16 15:20:36 -0700550 netif_poll_disable(tp->dev);
551 netif_tx_disable(tp->dev);
552}
553
554static inline void tg3_netif_start(struct tg3 *tp)
555{
556 netif_wake_queue(tp->dev);
557 /* NOTE: unconditional netif_wake_queue is only appropriate
558 * so long as all callers are assured to have free tx slots
559 * (such as after tg3_init_hw)
560 */
561 netif_poll_enable(tp->dev);
David S. Millerf47c11e2005-06-24 20:18:35 -0700562 tp->hw_status->status |= SD_STATUS_UPDATED;
563 tg3_enable_ints(tp);
Linus Torvalds1da177e2005-04-16 15:20:36 -0700564}
565
566static void tg3_switch_clocks(struct tg3 *tp)
567{
568 u32 clock_ctrl = tr32(TG3PCI_CLOCK_CTRL);
569 u32 orig_clock_ctrl;
570
Michael Chan4cf78e42005-07-25 12:29:19 -0700571 if (GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_5780)
572 return;
573
Linus Torvalds1da177e2005-04-16 15:20:36 -0700574 orig_clock_ctrl = clock_ctrl;
575 clock_ctrl &= (CLOCK_CTRL_FORCE_CLKRUN |
576 CLOCK_CTRL_CLKRUN_OENABLE |
577 0x1f);
578 tp->pci_clock_ctrl = clock_ctrl;
579
580 if (tp->tg3_flags2 & TG3_FLG2_5705_PLUS) {
581 if (orig_clock_ctrl & CLOCK_CTRL_625_CORE) {
582 tw32_f(TG3PCI_CLOCK_CTRL,
583 clock_ctrl | CLOCK_CTRL_625_CORE);
584 udelay(40);
585 }
586 } else if ((orig_clock_ctrl & CLOCK_CTRL_44MHZ_CORE) != 0) {
587 tw32_f(TG3PCI_CLOCK_CTRL,
588 clock_ctrl |
589 (CLOCK_CTRL_44MHZ_CORE | CLOCK_CTRL_ALTCLK));
590 udelay(40);
591 tw32_f(TG3PCI_CLOCK_CTRL,
592 clock_ctrl | (CLOCK_CTRL_ALTCLK));
593 udelay(40);
594 }
595 tw32_f(TG3PCI_CLOCK_CTRL, clock_ctrl);
596 udelay(40);
597}
598
599#define PHY_BUSY_LOOPS 5000
600
601static int tg3_readphy(struct tg3 *tp, int reg, u32 *val)
602{
603 u32 frame_val;
604 unsigned int loops;
605 int ret;
606
607 if ((tp->mi_mode & MAC_MI_MODE_AUTO_POLL) != 0) {
608 tw32_f(MAC_MI_MODE,
609 (tp->mi_mode & ~MAC_MI_MODE_AUTO_POLL));
610 udelay(80);
611 }
612
613 *val = 0x0;
614
615 frame_val = ((PHY_ADDR << MI_COM_PHY_ADDR_SHIFT) &
616 MI_COM_PHY_ADDR_MASK);
617 frame_val |= ((reg << MI_COM_REG_ADDR_SHIFT) &
618 MI_COM_REG_ADDR_MASK);
619 frame_val |= (MI_COM_CMD_READ | MI_COM_START);
620
621 tw32_f(MAC_MI_COM, frame_val);
622
623 loops = PHY_BUSY_LOOPS;
624 while (loops != 0) {
625 udelay(10);
626 frame_val = tr32(MAC_MI_COM);
627
628 if ((frame_val & MI_COM_BUSY) == 0) {
629 udelay(5);
630 frame_val = tr32(MAC_MI_COM);
631 break;
632 }
633 loops -= 1;
634 }
635
636 ret = -EBUSY;
637 if (loops != 0) {
638 *val = frame_val & MI_COM_DATA_MASK;
639 ret = 0;
640 }
641
642 if ((tp->mi_mode & MAC_MI_MODE_AUTO_POLL) != 0) {
643 tw32_f(MAC_MI_MODE, tp->mi_mode);
644 udelay(80);
645 }
646
647 return ret;
648}
649
650static int tg3_writephy(struct tg3 *tp, int reg, u32 val)
651{
652 u32 frame_val;
653 unsigned int loops;
654 int ret;
655
656 if ((tp->mi_mode & MAC_MI_MODE_AUTO_POLL) != 0) {
657 tw32_f(MAC_MI_MODE,
658 (tp->mi_mode & ~MAC_MI_MODE_AUTO_POLL));
659 udelay(80);
660 }
661
662 frame_val = ((PHY_ADDR << MI_COM_PHY_ADDR_SHIFT) &
663 MI_COM_PHY_ADDR_MASK);
664 frame_val |= ((reg << MI_COM_REG_ADDR_SHIFT) &
665 MI_COM_REG_ADDR_MASK);
666 frame_val |= (val & MI_COM_DATA_MASK);
667 frame_val |= (MI_COM_CMD_WRITE | MI_COM_START);
668
669 tw32_f(MAC_MI_COM, frame_val);
670
671 loops = PHY_BUSY_LOOPS;
672 while (loops != 0) {
673 udelay(10);
674 frame_val = tr32(MAC_MI_COM);
675 if ((frame_val & MI_COM_BUSY) == 0) {
676 udelay(5);
677 frame_val = tr32(MAC_MI_COM);
678 break;
679 }
680 loops -= 1;
681 }
682
683 ret = -EBUSY;
684 if (loops != 0)
685 ret = 0;
686
687 if ((tp->mi_mode & MAC_MI_MODE_AUTO_POLL) != 0) {
688 tw32_f(MAC_MI_MODE, tp->mi_mode);
689 udelay(80);
690 }
691
692 return ret;
693}
694
695static void tg3_phy_set_wirespeed(struct tg3 *tp)
696{
697 u32 val;
698
699 if (tp->tg3_flags2 & TG3_FLG2_NO_ETH_WIRE_SPEED)
700 return;
701
702 if (!tg3_writephy(tp, MII_TG3_AUX_CTRL, 0x7007) &&
703 !tg3_readphy(tp, MII_TG3_AUX_CTRL, &val))
704 tg3_writephy(tp, MII_TG3_AUX_CTRL,
705 (val | (1 << 15) | (1 << 4)));
706}
707
708static int tg3_bmcr_reset(struct tg3 *tp)
709{
710 u32 phy_control;
711 int limit, err;
712
713 /* OK, reset it, and poll the BMCR_RESET bit until it
714 * clears or we time out.
715 */
716 phy_control = BMCR_RESET;
717 err = tg3_writephy(tp, MII_BMCR, phy_control);
718 if (err != 0)
719 return -EBUSY;
720
721 limit = 5000;
722 while (limit--) {
723 err = tg3_readphy(tp, MII_BMCR, &phy_control);
724 if (err != 0)
725 return -EBUSY;
726
727 if ((phy_control & BMCR_RESET) == 0) {
728 udelay(40);
729 break;
730 }
731 udelay(10);
732 }
733 if (limit <= 0)
734 return -EBUSY;
735
736 return 0;
737}
738
739static int tg3_wait_macro_done(struct tg3 *tp)
740{
741 int limit = 100;
742
743 while (limit--) {
744 u32 tmp32;
745
746 if (!tg3_readphy(tp, 0x16, &tmp32)) {
747 if ((tmp32 & 0x1000) == 0)
748 break;
749 }
750 }
751 if (limit <= 0)
752 return -EBUSY;
753
754 return 0;
755}
756
757static int tg3_phy_write_and_check_testpat(struct tg3 *tp, int *resetp)
758{
759 static const u32 test_pat[4][6] = {
760 { 0x00005555, 0x00000005, 0x00002aaa, 0x0000000a, 0x00003456, 0x00000003 },
761 { 0x00002aaa, 0x0000000a, 0x00003333, 0x00000003, 0x0000789a, 0x00000005 },
762 { 0x00005a5a, 0x00000005, 0x00002a6a, 0x0000000a, 0x00001bcd, 0x00000003 },
763 { 0x00002a5a, 0x0000000a, 0x000033c3, 0x00000003, 0x00002ef1, 0x00000005 }
764 };
765 int chan;
766
767 for (chan = 0; chan < 4; chan++) {
768 int i;
769
770 tg3_writephy(tp, MII_TG3_DSP_ADDRESS,
771 (chan * 0x2000) | 0x0200);
772 tg3_writephy(tp, 0x16, 0x0002);
773
774 for (i = 0; i < 6; i++)
775 tg3_writephy(tp, MII_TG3_DSP_RW_PORT,
776 test_pat[chan][i]);
777
778 tg3_writephy(tp, 0x16, 0x0202);
779 if (tg3_wait_macro_done(tp)) {
780 *resetp = 1;
781 return -EBUSY;
782 }
783
784 tg3_writephy(tp, MII_TG3_DSP_ADDRESS,
785 (chan * 0x2000) | 0x0200);
786 tg3_writephy(tp, 0x16, 0x0082);
787 if (tg3_wait_macro_done(tp)) {
788 *resetp = 1;
789 return -EBUSY;
790 }
791
792 tg3_writephy(tp, 0x16, 0x0802);
793 if (tg3_wait_macro_done(tp)) {
794 *resetp = 1;
795 return -EBUSY;
796 }
797
798 for (i = 0; i < 6; i += 2) {
799 u32 low, high;
800
801 if (tg3_readphy(tp, MII_TG3_DSP_RW_PORT, &low) ||
802 tg3_readphy(tp, MII_TG3_DSP_RW_PORT, &high) ||
803 tg3_wait_macro_done(tp)) {
804 *resetp = 1;
805 return -EBUSY;
806 }
807 low &= 0x7fff;
808 high &= 0x000f;
809 if (low != test_pat[chan][i] ||
810 high != test_pat[chan][i+1]) {
811 tg3_writephy(tp, MII_TG3_DSP_ADDRESS, 0x000b);
812 tg3_writephy(tp, MII_TG3_DSP_RW_PORT, 0x4001);
813 tg3_writephy(tp, MII_TG3_DSP_RW_PORT, 0x4005);
814
815 return -EBUSY;
816 }
817 }
818 }
819
820 return 0;
821}
822
823static int tg3_phy_reset_chanpat(struct tg3 *tp)
824{
825 int chan;
826
827 for (chan = 0; chan < 4; chan++) {
828 int i;
829
830 tg3_writephy(tp, MII_TG3_DSP_ADDRESS,
831 (chan * 0x2000) | 0x0200);
832 tg3_writephy(tp, 0x16, 0x0002);
833 for (i = 0; i < 6; i++)
834 tg3_writephy(tp, MII_TG3_DSP_RW_PORT, 0x000);
835 tg3_writephy(tp, 0x16, 0x0202);
836 if (tg3_wait_macro_done(tp))
837 return -EBUSY;
838 }
839
840 return 0;
841}
842
843static int tg3_phy_reset_5703_4_5(struct tg3 *tp)
844{
845 u32 reg32, phy9_orig;
846 int retries, do_phy_reset, err;
847
848 retries = 10;
849 do_phy_reset = 1;
850 do {
851 if (do_phy_reset) {
852 err = tg3_bmcr_reset(tp);
853 if (err)
854 return err;
855 do_phy_reset = 0;
856 }
857
858 /* Disable transmitter and interrupt. */
859 if (tg3_readphy(tp, MII_TG3_EXT_CTRL, &reg32))
860 continue;
861
862 reg32 |= 0x3000;
863 tg3_writephy(tp, MII_TG3_EXT_CTRL, reg32);
864
865 /* Set full-duplex, 1000 mbps. */
866 tg3_writephy(tp, MII_BMCR,
867 BMCR_FULLDPLX | TG3_BMCR_SPEED1000);
868
869 /* Set to master mode. */
870 if (tg3_readphy(tp, MII_TG3_CTRL, &phy9_orig))
871 continue;
872
873 tg3_writephy(tp, MII_TG3_CTRL,
874 (MII_TG3_CTRL_AS_MASTER |
875 MII_TG3_CTRL_ENABLE_AS_MASTER));
876
877 /* Enable SM_DSP_CLOCK and 6dB. */
878 tg3_writephy(tp, MII_TG3_AUX_CTRL, 0x0c00);
879
880 /* Block the PHY control access. */
881 tg3_writephy(tp, MII_TG3_DSP_ADDRESS, 0x8005);
882 tg3_writephy(tp, MII_TG3_DSP_RW_PORT, 0x0800);
883
884 err = tg3_phy_write_and_check_testpat(tp, &do_phy_reset);
885 if (!err)
886 break;
887 } while (--retries);
888
889 err = tg3_phy_reset_chanpat(tp);
890 if (err)
891 return err;
892
893 tg3_writephy(tp, MII_TG3_DSP_ADDRESS, 0x8005);
894 tg3_writephy(tp, MII_TG3_DSP_RW_PORT, 0x0000);
895
896 tg3_writephy(tp, MII_TG3_DSP_ADDRESS, 0x8200);
897 tg3_writephy(tp, 0x16, 0x0000);
898
899 if (GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_5703 ||
900 GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_5704) {
901 /* Set Extended packet length bit for jumbo frames */
902 tg3_writephy(tp, MII_TG3_AUX_CTRL, 0x4400);
903 }
904 else {
905 tg3_writephy(tp, MII_TG3_AUX_CTRL, 0x0400);
906 }
907
908 tg3_writephy(tp, MII_TG3_CTRL, phy9_orig);
909
910 if (!tg3_readphy(tp, MII_TG3_EXT_CTRL, &reg32)) {
911 reg32 &= ~0x3000;
912 tg3_writephy(tp, MII_TG3_EXT_CTRL, reg32);
913 } else if (!err)
914 err = -EBUSY;
915
916 return err;
917}
918
919/* This will reset the tigon3 PHY if there is no valid
920 * link unless the FORCE argument is non-zero.
921 */
922static int tg3_phy_reset(struct tg3 *tp)
923{
924 u32 phy_status;
925 int err;
926
927 err = tg3_readphy(tp, MII_BMSR, &phy_status);
928 err |= tg3_readphy(tp, MII_BMSR, &phy_status);
929 if (err != 0)
930 return -EBUSY;
931
932 if (GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_5703 ||
933 GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_5704 ||
934 GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_5705) {
935 err = tg3_phy_reset_5703_4_5(tp);
936 if (err)
937 return err;
938 goto out;
939 }
940
941 err = tg3_bmcr_reset(tp);
942 if (err)
943 return err;
944
945out:
946 if (tp->tg3_flags2 & TG3_FLG2_PHY_ADC_BUG) {
947 tg3_writephy(tp, MII_TG3_AUX_CTRL, 0x0c00);
948 tg3_writephy(tp, MII_TG3_DSP_ADDRESS, 0x201f);
949 tg3_writephy(tp, MII_TG3_DSP_RW_PORT, 0x2aaa);
950 tg3_writephy(tp, MII_TG3_DSP_ADDRESS, 0x000a);
951 tg3_writephy(tp, MII_TG3_DSP_RW_PORT, 0x0323);
952 tg3_writephy(tp, MII_TG3_AUX_CTRL, 0x0400);
953 }
954 if (tp->tg3_flags2 & TG3_FLG2_PHY_5704_A0_BUG) {
955 tg3_writephy(tp, 0x1c, 0x8d68);
956 tg3_writephy(tp, 0x1c, 0x8d68);
957 }
958 if (tp->tg3_flags2 & TG3_FLG2_PHY_BER_BUG) {
959 tg3_writephy(tp, MII_TG3_AUX_CTRL, 0x0c00);
960 tg3_writephy(tp, MII_TG3_DSP_ADDRESS, 0x000a);
961 tg3_writephy(tp, MII_TG3_DSP_RW_PORT, 0x310b);
962 tg3_writephy(tp, MII_TG3_DSP_ADDRESS, 0x201f);
963 tg3_writephy(tp, MII_TG3_DSP_RW_PORT, 0x9506);
964 tg3_writephy(tp, MII_TG3_DSP_ADDRESS, 0x401f);
965 tg3_writephy(tp, MII_TG3_DSP_RW_PORT, 0x14e2);
966 tg3_writephy(tp, MII_TG3_AUX_CTRL, 0x0400);
967 }
968 /* Set Extended packet length bit (bit 14) on all chips that */
969 /* support jumbo frames */
970 if ((tp->phy_id & PHY_ID_MASK) == PHY_ID_BCM5401) {
971 /* Cannot do read-modify-write on 5401 */
972 tg3_writephy(tp, MII_TG3_AUX_CTRL, 0x4c20);
Michael Chan0f893dc2005-07-25 12:30:38 -0700973 } else if (tp->tg3_flags2 & TG3_FLG2_JUMBO_CAPABLE) {
Linus Torvalds1da177e2005-04-16 15:20:36 -0700974 u32 phy_reg;
975
976 /* Set bit 14 with read-modify-write to preserve other bits */
977 if (!tg3_writephy(tp, MII_TG3_AUX_CTRL, 0x0007) &&
978 !tg3_readphy(tp, MII_TG3_AUX_CTRL, &phy_reg))
979 tg3_writephy(tp, MII_TG3_AUX_CTRL, phy_reg | 0x4000);
980 }
981
982 /* Set phy register 0x10 bit 0 to high fifo elasticity to support
983 * jumbo frames transmission.
984 */
Michael Chan0f893dc2005-07-25 12:30:38 -0700985 if (tp->tg3_flags2 & TG3_FLG2_JUMBO_CAPABLE) {
Linus Torvalds1da177e2005-04-16 15:20:36 -0700986 u32 phy_reg;
987
988 if (!tg3_readphy(tp, MII_TG3_EXT_CTRL, &phy_reg))
989 tg3_writephy(tp, MII_TG3_EXT_CTRL,
990 phy_reg | MII_TG3_EXT_CTRL_FIFO_ELASTIC);
991 }
992
993 tg3_phy_set_wirespeed(tp);
994 return 0;
995}
996
997static void tg3_frob_aux_power(struct tg3 *tp)
998{
999 struct tg3 *tp_peer = tp;
1000
1001 if ((tp->tg3_flags & TG3_FLAG_EEPROM_WRITE_PROT) != 0)
1002 return;
1003
1004 if (GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_5704) {
1005 tp_peer = pci_get_drvdata(tp->pdev_peer);
1006 if (!tp_peer)
1007 BUG();
1008 }
1009
1010
1011 if ((tp->tg3_flags & TG3_FLAG_WOL_ENABLE) != 0 ||
1012 (tp_peer->tg3_flags & TG3_FLAG_WOL_ENABLE) != 0) {
1013 if (GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_5700 ||
1014 GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_5701) {
1015 tw32_f(GRC_LOCAL_CTRL, tp->grc_local_ctrl |
1016 (GRC_LCLCTRL_GPIO_OE0 |
1017 GRC_LCLCTRL_GPIO_OE1 |
1018 GRC_LCLCTRL_GPIO_OE2 |
1019 GRC_LCLCTRL_GPIO_OUTPUT0 |
1020 GRC_LCLCTRL_GPIO_OUTPUT1));
1021 udelay(100);
1022 } else {
1023 u32 no_gpio2;
1024 u32 grc_local_ctrl;
1025
1026 if (tp_peer != tp &&
1027 (tp_peer->tg3_flags & TG3_FLAG_INIT_COMPLETE) != 0)
1028 return;
1029
1030 /* On 5753 and variants, GPIO2 cannot be used. */
1031 no_gpio2 = tp->nic_sram_data_cfg &
1032 NIC_SRAM_DATA_CFG_NO_GPIO2;
1033
1034 grc_local_ctrl = GRC_LCLCTRL_GPIO_OE0 |
1035 GRC_LCLCTRL_GPIO_OE1 |
1036 GRC_LCLCTRL_GPIO_OE2 |
1037 GRC_LCLCTRL_GPIO_OUTPUT1 |
1038 GRC_LCLCTRL_GPIO_OUTPUT2;
1039 if (no_gpio2) {
1040 grc_local_ctrl &= ~(GRC_LCLCTRL_GPIO_OE2 |
1041 GRC_LCLCTRL_GPIO_OUTPUT2);
1042 }
1043 tw32_f(GRC_LOCAL_CTRL, tp->grc_local_ctrl |
1044 grc_local_ctrl);
1045 udelay(100);
1046
1047 grc_local_ctrl |= GRC_LCLCTRL_GPIO_OUTPUT0;
1048
1049 tw32_f(GRC_LOCAL_CTRL, tp->grc_local_ctrl |
1050 grc_local_ctrl);
1051 udelay(100);
1052
1053 if (!no_gpio2) {
1054 grc_local_ctrl &= ~GRC_LCLCTRL_GPIO_OUTPUT2;
1055 tw32_f(GRC_LOCAL_CTRL, tp->grc_local_ctrl |
1056 grc_local_ctrl);
1057 udelay(100);
1058 }
1059 }
1060 } else {
1061 if (GET_ASIC_REV(tp->pci_chip_rev_id) != ASIC_REV_5700 &&
1062 GET_ASIC_REV(tp->pci_chip_rev_id) != ASIC_REV_5701) {
1063 if (tp_peer != tp &&
1064 (tp_peer->tg3_flags & TG3_FLAG_INIT_COMPLETE) != 0)
1065 return;
1066
1067 tw32_f(GRC_LOCAL_CTRL, tp->grc_local_ctrl |
1068 (GRC_LCLCTRL_GPIO_OE1 |
1069 GRC_LCLCTRL_GPIO_OUTPUT1));
1070 udelay(100);
1071
1072 tw32_f(GRC_LOCAL_CTRL, tp->grc_local_ctrl |
1073 (GRC_LCLCTRL_GPIO_OE1));
1074 udelay(100);
1075
1076 tw32_f(GRC_LOCAL_CTRL, tp->grc_local_ctrl |
1077 (GRC_LCLCTRL_GPIO_OE1 |
1078 GRC_LCLCTRL_GPIO_OUTPUT1));
1079 udelay(100);
1080 }
1081 }
1082}
1083
1084static int tg3_setup_phy(struct tg3 *, int);
1085
1086#define RESET_KIND_SHUTDOWN 0
1087#define RESET_KIND_INIT 1
1088#define RESET_KIND_SUSPEND 2
1089
1090static void tg3_write_sig_post_reset(struct tg3 *, int);
1091static int tg3_halt_cpu(struct tg3 *, u32);
1092
1093static int tg3_set_power_state(struct tg3 *tp, int state)
1094{
1095 u32 misc_host_ctrl;
1096 u16 power_control, power_caps;
1097 int pm = tp->pm_cap;
1098
1099 /* Make sure register accesses (indirect or otherwise)
1100 * will function correctly.
1101 */
1102 pci_write_config_dword(tp->pdev,
1103 TG3PCI_MISC_HOST_CTRL,
1104 tp->misc_host_ctrl);
1105
1106 pci_read_config_word(tp->pdev,
1107 pm + PCI_PM_CTRL,
1108 &power_control);
1109 power_control |= PCI_PM_CTRL_PME_STATUS;
1110 power_control &= ~(PCI_PM_CTRL_STATE_MASK);
1111 switch (state) {
1112 case 0:
1113 power_control |= 0;
1114 pci_write_config_word(tp->pdev,
1115 pm + PCI_PM_CTRL,
1116 power_control);
Michael Chan8c6bda12005-04-21 17:09:08 -07001117 udelay(100); /* Delay after power state change */
1118
1119 /* Switch out of Vaux if it is not a LOM */
1120 if (!(tp->tg3_flags & TG3_FLAG_EEPROM_WRITE_PROT)) {
1121 tw32_f(GRC_LOCAL_CTRL, tp->grc_local_ctrl);
1122 udelay(100);
1123 }
Linus Torvalds1da177e2005-04-16 15:20:36 -07001124
1125 return 0;
1126
1127 case 1:
1128 power_control |= 1;
1129 break;
1130
1131 case 2:
1132 power_control |= 2;
1133 break;
1134
1135 case 3:
1136 power_control |= 3;
1137 break;
1138
1139 default:
1140 printk(KERN_WARNING PFX "%s: Invalid power state (%d) "
1141 "requested.\n",
1142 tp->dev->name, state);
1143 return -EINVAL;
1144 };
1145
1146 power_control |= PCI_PM_CTRL_PME_ENABLE;
1147
1148 misc_host_ctrl = tr32(TG3PCI_MISC_HOST_CTRL);
1149 tw32(TG3PCI_MISC_HOST_CTRL,
1150 misc_host_ctrl | MISC_HOST_CTRL_MASK_PCI_INT);
1151
1152 if (tp->link_config.phy_is_low_power == 0) {
1153 tp->link_config.phy_is_low_power = 1;
1154 tp->link_config.orig_speed = tp->link_config.speed;
1155 tp->link_config.orig_duplex = tp->link_config.duplex;
1156 tp->link_config.orig_autoneg = tp->link_config.autoneg;
1157 }
1158
Michael Chan747e8f82005-07-25 12:33:22 -07001159 if (!(tp->tg3_flags2 & TG3_FLG2_ANY_SERDES)) {
Linus Torvalds1da177e2005-04-16 15:20:36 -07001160 tp->link_config.speed = SPEED_10;
1161 tp->link_config.duplex = DUPLEX_HALF;
1162 tp->link_config.autoneg = AUTONEG_ENABLE;
1163 tg3_setup_phy(tp, 0);
1164 }
1165
1166 pci_read_config_word(tp->pdev, pm + PCI_PM_PMC, &power_caps);
1167
1168 if (tp->tg3_flags & TG3_FLAG_WOL_ENABLE) {
1169 u32 mac_mode;
1170
1171 if (!(tp->tg3_flags2 & TG3_FLG2_PHY_SERDES)) {
1172 tg3_writephy(tp, MII_TG3_AUX_CTRL, 0x5a);
1173 udelay(40);
1174
1175 mac_mode = MAC_MODE_PORT_MODE_MII;
1176
1177 if (GET_ASIC_REV(tp->pci_chip_rev_id) != ASIC_REV_5700 ||
1178 !(tp->tg3_flags & TG3_FLAG_WOL_SPEED_100MB))
1179 mac_mode |= MAC_MODE_LINK_POLARITY;
1180 } else {
1181 mac_mode = MAC_MODE_PORT_MODE_TBI;
1182 }
1183
John W. Linvillecbf46852005-04-21 17:01:29 -07001184 if (!(tp->tg3_flags2 & TG3_FLG2_5750_PLUS))
Linus Torvalds1da177e2005-04-16 15:20:36 -07001185 tw32(MAC_LED_CTRL, tp->led_ctrl);
1186
1187 if (((power_caps & PCI_PM_CAP_PME_D3cold) &&
1188 (tp->tg3_flags & TG3_FLAG_WOL_ENABLE)))
1189 mac_mode |= MAC_MODE_MAGIC_PKT_ENABLE;
1190
1191 tw32_f(MAC_MODE, mac_mode);
1192 udelay(100);
1193
1194 tw32_f(MAC_RX_MODE, RX_MODE_ENABLE);
1195 udelay(10);
1196 }
1197
1198 if (!(tp->tg3_flags & TG3_FLAG_WOL_SPEED_100MB) &&
1199 (GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_5700 ||
1200 GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_5701)) {
1201 u32 base_val;
1202
1203 base_val = tp->pci_clock_ctrl;
1204 base_val |= (CLOCK_CTRL_RXCLK_DISABLE |
1205 CLOCK_CTRL_TXCLK_DISABLE);
1206
1207 tw32_f(TG3PCI_CLOCK_CTRL, base_val |
1208 CLOCK_CTRL_ALTCLK |
1209 CLOCK_CTRL_PWRDOWN_PLL133);
1210 udelay(40);
Michael Chan4cf78e42005-07-25 12:29:19 -07001211 } else if (GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_5780) {
1212 /* do nothing */
Michael Chan85e94ce2005-04-21 17:05:28 -07001213 } else if (!((tp->tg3_flags2 & TG3_FLG2_5750_PLUS) &&
Linus Torvalds1da177e2005-04-16 15:20:36 -07001214 (tp->tg3_flags & TG3_FLAG_ENABLE_ASF))) {
1215 u32 newbits1, newbits2;
1216
1217 if (GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_5700 ||
1218 GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_5701) {
1219 newbits1 = (CLOCK_CTRL_RXCLK_DISABLE |
1220 CLOCK_CTRL_TXCLK_DISABLE |
1221 CLOCK_CTRL_ALTCLK);
1222 newbits2 = newbits1 | CLOCK_CTRL_44MHZ_CORE;
1223 } else if (tp->tg3_flags2 & TG3_FLG2_5705_PLUS) {
1224 newbits1 = CLOCK_CTRL_625_CORE;
1225 newbits2 = newbits1 | CLOCK_CTRL_ALTCLK;
1226 } else {
1227 newbits1 = CLOCK_CTRL_ALTCLK;
1228 newbits2 = newbits1 | CLOCK_CTRL_44MHZ_CORE;
1229 }
1230
1231 tw32_f(TG3PCI_CLOCK_CTRL, tp->pci_clock_ctrl | newbits1);
1232 udelay(40);
1233
1234 tw32_f(TG3PCI_CLOCK_CTRL, tp->pci_clock_ctrl | newbits2);
1235 udelay(40);
1236
1237 if (!(tp->tg3_flags2 & TG3_FLG2_5705_PLUS)) {
1238 u32 newbits3;
1239
1240 if (GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_5700 ||
1241 GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_5701) {
1242 newbits3 = (CLOCK_CTRL_RXCLK_DISABLE |
1243 CLOCK_CTRL_TXCLK_DISABLE |
1244 CLOCK_CTRL_44MHZ_CORE);
1245 } else {
1246 newbits3 = CLOCK_CTRL_44MHZ_CORE;
1247 }
1248
1249 tw32_f(TG3PCI_CLOCK_CTRL,
1250 tp->pci_clock_ctrl | newbits3);
1251 udelay(40);
1252 }
1253 }
1254
1255 tg3_frob_aux_power(tp);
1256
1257 /* Workaround for unstable PLL clock */
1258 if ((GET_CHIP_REV(tp->pci_chip_rev_id) == CHIPREV_5750_AX) ||
1259 (GET_CHIP_REV(tp->pci_chip_rev_id) == CHIPREV_5750_BX)) {
1260 u32 val = tr32(0x7d00);
1261
1262 val &= ~((1 << 16) | (1 << 4) | (1 << 2) | (1 << 1) | 1);
1263 tw32(0x7d00, val);
1264 if (!(tp->tg3_flags & TG3_FLAG_ENABLE_ASF))
1265 tg3_halt_cpu(tp, RX_CPU_BASE);
1266 }
1267
1268 /* Finally, set the new power state. */
1269 pci_write_config_word(tp->pdev, pm + PCI_PM_CTRL, power_control);
Michael Chan8c6bda12005-04-21 17:09:08 -07001270 udelay(100); /* Delay after power state change */
Linus Torvalds1da177e2005-04-16 15:20:36 -07001271
1272 tg3_write_sig_post_reset(tp, RESET_KIND_SHUTDOWN);
1273
1274 return 0;
1275}
1276
1277static void tg3_link_report(struct tg3 *tp)
1278{
1279 if (!netif_carrier_ok(tp->dev)) {
1280 printk(KERN_INFO PFX "%s: Link is down.\n", tp->dev->name);
1281 } else {
1282 printk(KERN_INFO PFX "%s: Link is up at %d Mbps, %s duplex.\n",
1283 tp->dev->name,
1284 (tp->link_config.active_speed == SPEED_1000 ?
1285 1000 :
1286 (tp->link_config.active_speed == SPEED_100 ?
1287 100 : 10)),
1288 (tp->link_config.active_duplex == DUPLEX_FULL ?
1289 "full" : "half"));
1290
1291 printk(KERN_INFO PFX "%s: Flow control is %s for TX and "
1292 "%s for RX.\n",
1293 tp->dev->name,
1294 (tp->tg3_flags & TG3_FLAG_TX_PAUSE) ? "on" : "off",
1295 (tp->tg3_flags & TG3_FLAG_RX_PAUSE) ? "on" : "off");
1296 }
1297}
1298
1299static void tg3_setup_flow_control(struct tg3 *tp, u32 local_adv, u32 remote_adv)
1300{
1301 u32 new_tg3_flags = 0;
1302 u32 old_rx_mode = tp->rx_mode;
1303 u32 old_tx_mode = tp->tx_mode;
1304
1305 if (tp->tg3_flags & TG3_FLAG_PAUSE_AUTONEG) {
Michael Chan747e8f82005-07-25 12:33:22 -07001306
1307 /* Convert 1000BaseX flow control bits to 1000BaseT
1308 * bits before resolving flow control.
1309 */
1310 if (tp->tg3_flags2 & TG3_FLG2_MII_SERDES) {
1311 local_adv &= ~(ADVERTISE_PAUSE_CAP |
1312 ADVERTISE_PAUSE_ASYM);
1313 remote_adv &= ~(LPA_PAUSE_CAP | LPA_PAUSE_ASYM);
1314
1315 if (local_adv & ADVERTISE_1000XPAUSE)
1316 local_adv |= ADVERTISE_PAUSE_CAP;
1317 if (local_adv & ADVERTISE_1000XPSE_ASYM)
1318 local_adv |= ADVERTISE_PAUSE_ASYM;
1319 if (remote_adv & LPA_1000XPAUSE)
1320 remote_adv |= LPA_PAUSE_CAP;
1321 if (remote_adv & LPA_1000XPAUSE_ASYM)
1322 remote_adv |= LPA_PAUSE_ASYM;
1323 }
1324
Linus Torvalds1da177e2005-04-16 15:20:36 -07001325 if (local_adv & ADVERTISE_PAUSE_CAP) {
1326 if (local_adv & ADVERTISE_PAUSE_ASYM) {
1327 if (remote_adv & LPA_PAUSE_CAP)
1328 new_tg3_flags |=
1329 (TG3_FLAG_RX_PAUSE |
1330 TG3_FLAG_TX_PAUSE);
1331 else if (remote_adv & LPA_PAUSE_ASYM)
1332 new_tg3_flags |=
1333 (TG3_FLAG_RX_PAUSE);
1334 } else {
1335 if (remote_adv & LPA_PAUSE_CAP)
1336 new_tg3_flags |=
1337 (TG3_FLAG_RX_PAUSE |
1338 TG3_FLAG_TX_PAUSE);
1339 }
1340 } else if (local_adv & ADVERTISE_PAUSE_ASYM) {
1341 if ((remote_adv & LPA_PAUSE_CAP) &&
1342 (remote_adv & LPA_PAUSE_ASYM))
1343 new_tg3_flags |= TG3_FLAG_TX_PAUSE;
1344 }
1345
1346 tp->tg3_flags &= ~(TG3_FLAG_RX_PAUSE | TG3_FLAG_TX_PAUSE);
1347 tp->tg3_flags |= new_tg3_flags;
1348 } else {
1349 new_tg3_flags = tp->tg3_flags;
1350 }
1351
1352 if (new_tg3_flags & TG3_FLAG_RX_PAUSE)
1353 tp->rx_mode |= RX_MODE_FLOW_CTRL_ENABLE;
1354 else
1355 tp->rx_mode &= ~RX_MODE_FLOW_CTRL_ENABLE;
1356
1357 if (old_rx_mode != tp->rx_mode) {
1358 tw32_f(MAC_RX_MODE, tp->rx_mode);
1359 }
1360
1361 if (new_tg3_flags & TG3_FLAG_TX_PAUSE)
1362 tp->tx_mode |= TX_MODE_FLOW_CTRL_ENABLE;
1363 else
1364 tp->tx_mode &= ~TX_MODE_FLOW_CTRL_ENABLE;
1365
1366 if (old_tx_mode != tp->tx_mode) {
1367 tw32_f(MAC_TX_MODE, tp->tx_mode);
1368 }
1369}
1370
1371static void tg3_aux_stat_to_speed_duplex(struct tg3 *tp, u32 val, u16 *speed, u8 *duplex)
1372{
1373 switch (val & MII_TG3_AUX_STAT_SPDMASK) {
1374 case MII_TG3_AUX_STAT_10HALF:
1375 *speed = SPEED_10;
1376 *duplex = DUPLEX_HALF;
1377 break;
1378
1379 case MII_TG3_AUX_STAT_10FULL:
1380 *speed = SPEED_10;
1381 *duplex = DUPLEX_FULL;
1382 break;
1383
1384 case MII_TG3_AUX_STAT_100HALF:
1385 *speed = SPEED_100;
1386 *duplex = DUPLEX_HALF;
1387 break;
1388
1389 case MII_TG3_AUX_STAT_100FULL:
1390 *speed = SPEED_100;
1391 *duplex = DUPLEX_FULL;
1392 break;
1393
1394 case MII_TG3_AUX_STAT_1000HALF:
1395 *speed = SPEED_1000;
1396 *duplex = DUPLEX_HALF;
1397 break;
1398
1399 case MII_TG3_AUX_STAT_1000FULL:
1400 *speed = SPEED_1000;
1401 *duplex = DUPLEX_FULL;
1402 break;
1403
1404 default:
1405 *speed = SPEED_INVALID;
1406 *duplex = DUPLEX_INVALID;
1407 break;
1408 };
1409}
1410
1411static void tg3_phy_copper_begin(struct tg3 *tp)
1412{
1413 u32 new_adv;
1414 int i;
1415
1416 if (tp->link_config.phy_is_low_power) {
1417 /* Entering low power mode. Disable gigabit and
1418 * 100baseT advertisements.
1419 */
1420 tg3_writephy(tp, MII_TG3_CTRL, 0);
1421
1422 new_adv = (ADVERTISE_10HALF | ADVERTISE_10FULL |
1423 ADVERTISE_CSMA | ADVERTISE_PAUSE_CAP);
1424 if (tp->tg3_flags & TG3_FLAG_WOL_SPEED_100MB)
1425 new_adv |= (ADVERTISE_100HALF | ADVERTISE_100FULL);
1426
1427 tg3_writephy(tp, MII_ADVERTISE, new_adv);
1428 } else if (tp->link_config.speed == SPEED_INVALID) {
1429 tp->link_config.advertising =
1430 (ADVERTISED_10baseT_Half | ADVERTISED_10baseT_Full |
1431 ADVERTISED_100baseT_Half | ADVERTISED_100baseT_Full |
1432 ADVERTISED_1000baseT_Half | ADVERTISED_1000baseT_Full |
1433 ADVERTISED_Autoneg | ADVERTISED_MII);
1434
1435 if (tp->tg3_flags & TG3_FLAG_10_100_ONLY)
1436 tp->link_config.advertising &=
1437 ~(ADVERTISED_1000baseT_Half |
1438 ADVERTISED_1000baseT_Full);
1439
1440 new_adv = (ADVERTISE_CSMA | ADVERTISE_PAUSE_CAP);
1441 if (tp->link_config.advertising & ADVERTISED_10baseT_Half)
1442 new_adv |= ADVERTISE_10HALF;
1443 if (tp->link_config.advertising & ADVERTISED_10baseT_Full)
1444 new_adv |= ADVERTISE_10FULL;
1445 if (tp->link_config.advertising & ADVERTISED_100baseT_Half)
1446 new_adv |= ADVERTISE_100HALF;
1447 if (tp->link_config.advertising & ADVERTISED_100baseT_Full)
1448 new_adv |= ADVERTISE_100FULL;
1449 tg3_writephy(tp, MII_ADVERTISE, new_adv);
1450
1451 if (tp->link_config.advertising &
1452 (ADVERTISED_1000baseT_Half | ADVERTISED_1000baseT_Full)) {
1453 new_adv = 0;
1454 if (tp->link_config.advertising & ADVERTISED_1000baseT_Half)
1455 new_adv |= MII_TG3_CTRL_ADV_1000_HALF;
1456 if (tp->link_config.advertising & ADVERTISED_1000baseT_Full)
1457 new_adv |= MII_TG3_CTRL_ADV_1000_FULL;
1458 if (!(tp->tg3_flags & TG3_FLAG_10_100_ONLY) &&
1459 (tp->pci_chip_rev_id == CHIPREV_ID_5701_A0 ||
1460 tp->pci_chip_rev_id == CHIPREV_ID_5701_B0))
1461 new_adv |= (MII_TG3_CTRL_AS_MASTER |
1462 MII_TG3_CTRL_ENABLE_AS_MASTER);
1463 tg3_writephy(tp, MII_TG3_CTRL, new_adv);
1464 } else {
1465 tg3_writephy(tp, MII_TG3_CTRL, 0);
1466 }
1467 } else {
1468 /* Asking for a specific link mode. */
1469 if (tp->link_config.speed == SPEED_1000) {
1470 new_adv = ADVERTISE_CSMA | ADVERTISE_PAUSE_CAP;
1471 tg3_writephy(tp, MII_ADVERTISE, new_adv);
1472
1473 if (tp->link_config.duplex == DUPLEX_FULL)
1474 new_adv = MII_TG3_CTRL_ADV_1000_FULL;
1475 else
1476 new_adv = MII_TG3_CTRL_ADV_1000_HALF;
1477 if (tp->pci_chip_rev_id == CHIPREV_ID_5701_A0 ||
1478 tp->pci_chip_rev_id == CHIPREV_ID_5701_B0)
1479 new_adv |= (MII_TG3_CTRL_AS_MASTER |
1480 MII_TG3_CTRL_ENABLE_AS_MASTER);
1481 tg3_writephy(tp, MII_TG3_CTRL, new_adv);
1482 } else {
1483 tg3_writephy(tp, MII_TG3_CTRL, 0);
1484
1485 new_adv = ADVERTISE_CSMA | ADVERTISE_PAUSE_CAP;
1486 if (tp->link_config.speed == SPEED_100) {
1487 if (tp->link_config.duplex == DUPLEX_FULL)
1488 new_adv |= ADVERTISE_100FULL;
1489 else
1490 new_adv |= ADVERTISE_100HALF;
1491 } else {
1492 if (tp->link_config.duplex == DUPLEX_FULL)
1493 new_adv |= ADVERTISE_10FULL;
1494 else
1495 new_adv |= ADVERTISE_10HALF;
1496 }
1497 tg3_writephy(tp, MII_ADVERTISE, new_adv);
1498 }
1499 }
1500
1501 if (tp->link_config.autoneg == AUTONEG_DISABLE &&
1502 tp->link_config.speed != SPEED_INVALID) {
1503 u32 bmcr, orig_bmcr;
1504
1505 tp->link_config.active_speed = tp->link_config.speed;
1506 tp->link_config.active_duplex = tp->link_config.duplex;
1507
1508 bmcr = 0;
1509 switch (tp->link_config.speed) {
1510 default:
1511 case SPEED_10:
1512 break;
1513
1514 case SPEED_100:
1515 bmcr |= BMCR_SPEED100;
1516 break;
1517
1518 case SPEED_1000:
1519 bmcr |= TG3_BMCR_SPEED1000;
1520 break;
1521 };
1522
1523 if (tp->link_config.duplex == DUPLEX_FULL)
1524 bmcr |= BMCR_FULLDPLX;
1525
1526 if (!tg3_readphy(tp, MII_BMCR, &orig_bmcr) &&
1527 (bmcr != orig_bmcr)) {
1528 tg3_writephy(tp, MII_BMCR, BMCR_LOOPBACK);
1529 for (i = 0; i < 1500; i++) {
1530 u32 tmp;
1531
1532 udelay(10);
1533 if (tg3_readphy(tp, MII_BMSR, &tmp) ||
1534 tg3_readphy(tp, MII_BMSR, &tmp))
1535 continue;
1536 if (!(tmp & BMSR_LSTATUS)) {
1537 udelay(40);
1538 break;
1539 }
1540 }
1541 tg3_writephy(tp, MII_BMCR, bmcr);
1542 udelay(40);
1543 }
1544 } else {
1545 tg3_writephy(tp, MII_BMCR,
1546 BMCR_ANENABLE | BMCR_ANRESTART);
1547 }
1548}
1549
1550static int tg3_init_5401phy_dsp(struct tg3 *tp)
1551{
1552 int err;
1553
1554 /* Turn off tap power management. */
1555 /* Set Extended packet length bit */
1556 err = tg3_writephy(tp, MII_TG3_AUX_CTRL, 0x4c20);
1557
1558 err |= tg3_writephy(tp, MII_TG3_DSP_ADDRESS, 0x0012);
1559 err |= tg3_writephy(tp, MII_TG3_DSP_RW_PORT, 0x1804);
1560
1561 err |= tg3_writephy(tp, MII_TG3_DSP_ADDRESS, 0x0013);
1562 err |= tg3_writephy(tp, MII_TG3_DSP_RW_PORT, 0x1204);
1563
1564 err |= tg3_writephy(tp, MII_TG3_DSP_ADDRESS, 0x8006);
1565 err |= tg3_writephy(tp, MII_TG3_DSP_RW_PORT, 0x0132);
1566
1567 err |= tg3_writephy(tp, MII_TG3_DSP_ADDRESS, 0x8006);
1568 err |= tg3_writephy(tp, MII_TG3_DSP_RW_PORT, 0x0232);
1569
1570 err |= tg3_writephy(tp, MII_TG3_DSP_ADDRESS, 0x201f);
1571 err |= tg3_writephy(tp, MII_TG3_DSP_RW_PORT, 0x0a20);
1572
1573 udelay(40);
1574
1575 return err;
1576}
1577
1578static int tg3_copper_is_advertising_all(struct tg3 *tp)
1579{
1580 u32 adv_reg, all_mask;
1581
1582 if (tg3_readphy(tp, MII_ADVERTISE, &adv_reg))
1583 return 0;
1584
1585 all_mask = (ADVERTISE_10HALF | ADVERTISE_10FULL |
1586 ADVERTISE_100HALF | ADVERTISE_100FULL);
1587 if ((adv_reg & all_mask) != all_mask)
1588 return 0;
1589 if (!(tp->tg3_flags & TG3_FLAG_10_100_ONLY)) {
1590 u32 tg3_ctrl;
1591
1592 if (tg3_readphy(tp, MII_TG3_CTRL, &tg3_ctrl))
1593 return 0;
1594
1595 all_mask = (MII_TG3_CTRL_ADV_1000_HALF |
1596 MII_TG3_CTRL_ADV_1000_FULL);
1597 if ((tg3_ctrl & all_mask) != all_mask)
1598 return 0;
1599 }
1600 return 1;
1601}
1602
1603static int tg3_setup_copper_phy(struct tg3 *tp, int force_reset)
1604{
1605 int current_link_up;
1606 u32 bmsr, dummy;
1607 u16 current_speed;
1608 u8 current_duplex;
1609 int i, err;
1610
1611 tw32(MAC_EVENT, 0);
1612
1613 tw32_f(MAC_STATUS,
1614 (MAC_STATUS_SYNC_CHANGED |
1615 MAC_STATUS_CFG_CHANGED |
1616 MAC_STATUS_MI_COMPLETION |
1617 MAC_STATUS_LNKSTATE_CHANGED));
1618 udelay(40);
1619
1620 tp->mi_mode = MAC_MI_MODE_BASE;
1621 tw32_f(MAC_MI_MODE, tp->mi_mode);
1622 udelay(80);
1623
1624 tg3_writephy(tp, MII_TG3_AUX_CTRL, 0x02);
1625
1626 /* Some third-party PHYs need to be reset on link going
1627 * down.
1628 */
1629 if ((GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_5703 ||
1630 GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_5704 ||
1631 GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_5705) &&
1632 netif_carrier_ok(tp->dev)) {
1633 tg3_readphy(tp, MII_BMSR, &bmsr);
1634 if (!tg3_readphy(tp, MII_BMSR, &bmsr) &&
1635 !(bmsr & BMSR_LSTATUS))
1636 force_reset = 1;
1637 }
1638 if (force_reset)
1639 tg3_phy_reset(tp);
1640
1641 if ((tp->phy_id & PHY_ID_MASK) == PHY_ID_BCM5401) {
1642 tg3_readphy(tp, MII_BMSR, &bmsr);
1643 if (tg3_readphy(tp, MII_BMSR, &bmsr) ||
1644 !(tp->tg3_flags & TG3_FLAG_INIT_COMPLETE))
1645 bmsr = 0;
1646
1647 if (!(bmsr & BMSR_LSTATUS)) {
1648 err = tg3_init_5401phy_dsp(tp);
1649 if (err)
1650 return err;
1651
1652 tg3_readphy(tp, MII_BMSR, &bmsr);
1653 for (i = 0; i < 1000; i++) {
1654 udelay(10);
1655 if (!tg3_readphy(tp, MII_BMSR, &bmsr) &&
1656 (bmsr & BMSR_LSTATUS)) {
1657 udelay(40);
1658 break;
1659 }
1660 }
1661
1662 if ((tp->phy_id & PHY_ID_REV_MASK) == PHY_REV_BCM5401_B0 &&
1663 !(bmsr & BMSR_LSTATUS) &&
1664 tp->link_config.active_speed == SPEED_1000) {
1665 err = tg3_phy_reset(tp);
1666 if (!err)
1667 err = tg3_init_5401phy_dsp(tp);
1668 if (err)
1669 return err;
1670 }
1671 }
1672 } else if (tp->pci_chip_rev_id == CHIPREV_ID_5701_A0 ||
1673 tp->pci_chip_rev_id == CHIPREV_ID_5701_B0) {
1674 /* 5701 {A0,B0} CRC bug workaround */
1675 tg3_writephy(tp, 0x15, 0x0a75);
1676 tg3_writephy(tp, 0x1c, 0x8c68);
1677 tg3_writephy(tp, 0x1c, 0x8d68);
1678 tg3_writephy(tp, 0x1c, 0x8c68);
1679 }
1680
1681 /* Clear pending interrupts... */
1682 tg3_readphy(tp, MII_TG3_ISTAT, &dummy);
1683 tg3_readphy(tp, MII_TG3_ISTAT, &dummy);
1684
1685 if (tp->tg3_flags & TG3_FLAG_USE_MI_INTERRUPT)
1686 tg3_writephy(tp, MII_TG3_IMASK, ~MII_TG3_INT_LINKCHG);
1687 else
1688 tg3_writephy(tp, MII_TG3_IMASK, ~0);
1689
1690 if (GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_5700 ||
1691 GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_5701) {
1692 if (tp->led_ctrl == LED_CTRL_MODE_PHY_1)
1693 tg3_writephy(tp, MII_TG3_EXT_CTRL,
1694 MII_TG3_EXT_CTRL_LNK3_LED_MODE);
1695 else
1696 tg3_writephy(tp, MII_TG3_EXT_CTRL, 0);
1697 }
1698
1699 current_link_up = 0;
1700 current_speed = SPEED_INVALID;
1701 current_duplex = DUPLEX_INVALID;
1702
1703 if (tp->tg3_flags2 & TG3_FLG2_CAPACITIVE_COUPLING) {
1704 u32 val;
1705
1706 tg3_writephy(tp, MII_TG3_AUX_CTRL, 0x4007);
1707 tg3_readphy(tp, MII_TG3_AUX_CTRL, &val);
1708 if (!(val & (1 << 10))) {
1709 val |= (1 << 10);
1710 tg3_writephy(tp, MII_TG3_AUX_CTRL, val);
1711 goto relink;
1712 }
1713 }
1714
1715 bmsr = 0;
1716 for (i = 0; i < 100; i++) {
1717 tg3_readphy(tp, MII_BMSR, &bmsr);
1718 if (!tg3_readphy(tp, MII_BMSR, &bmsr) &&
1719 (bmsr & BMSR_LSTATUS))
1720 break;
1721 udelay(40);
1722 }
1723
1724 if (bmsr & BMSR_LSTATUS) {
1725 u32 aux_stat, bmcr;
1726
1727 tg3_readphy(tp, MII_TG3_AUX_STAT, &aux_stat);
1728 for (i = 0; i < 2000; i++) {
1729 udelay(10);
1730 if (!tg3_readphy(tp, MII_TG3_AUX_STAT, &aux_stat) &&
1731 aux_stat)
1732 break;
1733 }
1734
1735 tg3_aux_stat_to_speed_duplex(tp, aux_stat,
1736 &current_speed,
1737 &current_duplex);
1738
1739 bmcr = 0;
1740 for (i = 0; i < 200; i++) {
1741 tg3_readphy(tp, MII_BMCR, &bmcr);
1742 if (tg3_readphy(tp, MII_BMCR, &bmcr))
1743 continue;
1744 if (bmcr && bmcr != 0x7fff)
1745 break;
1746 udelay(10);
1747 }
1748
1749 if (tp->link_config.autoneg == AUTONEG_ENABLE) {
1750 if (bmcr & BMCR_ANENABLE) {
1751 current_link_up = 1;
1752
1753 /* Force autoneg restart if we are exiting
1754 * low power mode.
1755 */
1756 if (!tg3_copper_is_advertising_all(tp))
1757 current_link_up = 0;
1758 } else {
1759 current_link_up = 0;
1760 }
1761 } else {
1762 if (!(bmcr & BMCR_ANENABLE) &&
1763 tp->link_config.speed == current_speed &&
1764 tp->link_config.duplex == current_duplex) {
1765 current_link_up = 1;
1766 } else {
1767 current_link_up = 0;
1768 }
1769 }
1770
1771 tp->link_config.active_speed = current_speed;
1772 tp->link_config.active_duplex = current_duplex;
1773 }
1774
1775 if (current_link_up == 1 &&
1776 (tp->link_config.active_duplex == DUPLEX_FULL) &&
1777 (tp->link_config.autoneg == AUTONEG_ENABLE)) {
1778 u32 local_adv, remote_adv;
1779
1780 if (tg3_readphy(tp, MII_ADVERTISE, &local_adv))
1781 local_adv = 0;
1782 local_adv &= (ADVERTISE_PAUSE_CAP | ADVERTISE_PAUSE_ASYM);
1783
1784 if (tg3_readphy(tp, MII_LPA, &remote_adv))
1785 remote_adv = 0;
1786
1787 remote_adv &= (LPA_PAUSE_CAP | LPA_PAUSE_ASYM);
1788
1789 /* If we are not advertising full pause capability,
1790 * something is wrong. Bring the link down and reconfigure.
1791 */
1792 if (local_adv != ADVERTISE_PAUSE_CAP) {
1793 current_link_up = 0;
1794 } else {
1795 tg3_setup_flow_control(tp, local_adv, remote_adv);
1796 }
1797 }
1798relink:
1799 if (current_link_up == 0) {
1800 u32 tmp;
1801
1802 tg3_phy_copper_begin(tp);
1803
1804 tg3_readphy(tp, MII_BMSR, &tmp);
1805 if (!tg3_readphy(tp, MII_BMSR, &tmp) &&
1806 (tmp & BMSR_LSTATUS))
1807 current_link_up = 1;
1808 }
1809
1810 tp->mac_mode &= ~MAC_MODE_PORT_MODE_MASK;
1811 if (current_link_up == 1) {
1812 if (tp->link_config.active_speed == SPEED_100 ||
1813 tp->link_config.active_speed == SPEED_10)
1814 tp->mac_mode |= MAC_MODE_PORT_MODE_MII;
1815 else
1816 tp->mac_mode |= MAC_MODE_PORT_MODE_GMII;
1817 } else
1818 tp->mac_mode |= MAC_MODE_PORT_MODE_GMII;
1819
1820 tp->mac_mode &= ~MAC_MODE_HALF_DUPLEX;
1821 if (tp->link_config.active_duplex == DUPLEX_HALF)
1822 tp->mac_mode |= MAC_MODE_HALF_DUPLEX;
1823
1824 tp->mac_mode &= ~MAC_MODE_LINK_POLARITY;
1825 if (GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_5700) {
1826 if ((tp->led_ctrl == LED_CTRL_MODE_PHY_2) ||
1827 (current_link_up == 1 &&
1828 tp->link_config.active_speed == SPEED_10))
1829 tp->mac_mode |= MAC_MODE_LINK_POLARITY;
1830 } else {
1831 if (current_link_up == 1)
1832 tp->mac_mode |= MAC_MODE_LINK_POLARITY;
1833 }
1834
1835 /* ??? Without this setting Netgear GA302T PHY does not
1836 * ??? send/receive packets...
1837 */
1838 if ((tp->phy_id & PHY_ID_MASK) == PHY_ID_BCM5411 &&
1839 tp->pci_chip_rev_id == CHIPREV_ID_5700_ALTIMA) {
1840 tp->mi_mode |= MAC_MI_MODE_AUTO_POLL;
1841 tw32_f(MAC_MI_MODE, tp->mi_mode);
1842 udelay(80);
1843 }
1844
1845 tw32_f(MAC_MODE, tp->mac_mode);
1846 udelay(40);
1847
1848 if (tp->tg3_flags & TG3_FLAG_USE_LINKCHG_REG) {
1849 /* Polled via timer. */
1850 tw32_f(MAC_EVENT, 0);
1851 } else {
1852 tw32_f(MAC_EVENT, MAC_EVENT_LNKSTATE_CHANGED);
1853 }
1854 udelay(40);
1855
1856 if (GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_5700 &&
1857 current_link_up == 1 &&
1858 tp->link_config.active_speed == SPEED_1000 &&
1859 ((tp->tg3_flags & TG3_FLAG_PCIX_MODE) ||
1860 (tp->tg3_flags & TG3_FLAG_PCI_HIGH_SPEED))) {
1861 udelay(120);
1862 tw32_f(MAC_STATUS,
1863 (MAC_STATUS_SYNC_CHANGED |
1864 MAC_STATUS_CFG_CHANGED));
1865 udelay(40);
1866 tg3_write_mem(tp,
1867 NIC_SRAM_FIRMWARE_MBOX,
1868 NIC_SRAM_FIRMWARE_MBOX_MAGIC2);
1869 }
1870
1871 if (current_link_up != netif_carrier_ok(tp->dev)) {
1872 if (current_link_up)
1873 netif_carrier_on(tp->dev);
1874 else
1875 netif_carrier_off(tp->dev);
1876 tg3_link_report(tp);
1877 }
1878
1879 return 0;
1880}
1881
1882struct tg3_fiber_aneginfo {
1883 int state;
1884#define ANEG_STATE_UNKNOWN 0
1885#define ANEG_STATE_AN_ENABLE 1
1886#define ANEG_STATE_RESTART_INIT 2
1887#define ANEG_STATE_RESTART 3
1888#define ANEG_STATE_DISABLE_LINK_OK 4
1889#define ANEG_STATE_ABILITY_DETECT_INIT 5
1890#define ANEG_STATE_ABILITY_DETECT 6
1891#define ANEG_STATE_ACK_DETECT_INIT 7
1892#define ANEG_STATE_ACK_DETECT 8
1893#define ANEG_STATE_COMPLETE_ACK_INIT 9
1894#define ANEG_STATE_COMPLETE_ACK 10
1895#define ANEG_STATE_IDLE_DETECT_INIT 11
1896#define ANEG_STATE_IDLE_DETECT 12
1897#define ANEG_STATE_LINK_OK 13
1898#define ANEG_STATE_NEXT_PAGE_WAIT_INIT 14
1899#define ANEG_STATE_NEXT_PAGE_WAIT 15
1900
1901 u32 flags;
1902#define MR_AN_ENABLE 0x00000001
1903#define MR_RESTART_AN 0x00000002
1904#define MR_AN_COMPLETE 0x00000004
1905#define MR_PAGE_RX 0x00000008
1906#define MR_NP_LOADED 0x00000010
1907#define MR_TOGGLE_TX 0x00000020
1908#define MR_LP_ADV_FULL_DUPLEX 0x00000040
1909#define MR_LP_ADV_HALF_DUPLEX 0x00000080
1910#define MR_LP_ADV_SYM_PAUSE 0x00000100
1911#define MR_LP_ADV_ASYM_PAUSE 0x00000200
1912#define MR_LP_ADV_REMOTE_FAULT1 0x00000400
1913#define MR_LP_ADV_REMOTE_FAULT2 0x00000800
1914#define MR_LP_ADV_NEXT_PAGE 0x00001000
1915#define MR_TOGGLE_RX 0x00002000
1916#define MR_NP_RX 0x00004000
1917
1918#define MR_LINK_OK 0x80000000
1919
1920 unsigned long link_time, cur_time;
1921
1922 u32 ability_match_cfg;
1923 int ability_match_count;
1924
1925 char ability_match, idle_match, ack_match;
1926
1927 u32 txconfig, rxconfig;
1928#define ANEG_CFG_NP 0x00000080
1929#define ANEG_CFG_ACK 0x00000040
1930#define ANEG_CFG_RF2 0x00000020
1931#define ANEG_CFG_RF1 0x00000010
1932#define ANEG_CFG_PS2 0x00000001
1933#define ANEG_CFG_PS1 0x00008000
1934#define ANEG_CFG_HD 0x00004000
1935#define ANEG_CFG_FD 0x00002000
1936#define ANEG_CFG_INVAL 0x00001f06
1937
1938};
1939#define ANEG_OK 0
1940#define ANEG_DONE 1
1941#define ANEG_TIMER_ENAB 2
1942#define ANEG_FAILED -1
1943
1944#define ANEG_STATE_SETTLE_TIME 10000
1945
1946static int tg3_fiber_aneg_smachine(struct tg3 *tp,
1947 struct tg3_fiber_aneginfo *ap)
1948{
1949 unsigned long delta;
1950 u32 rx_cfg_reg;
1951 int ret;
1952
1953 if (ap->state == ANEG_STATE_UNKNOWN) {
1954 ap->rxconfig = 0;
1955 ap->link_time = 0;
1956 ap->cur_time = 0;
1957 ap->ability_match_cfg = 0;
1958 ap->ability_match_count = 0;
1959 ap->ability_match = 0;
1960 ap->idle_match = 0;
1961 ap->ack_match = 0;
1962 }
1963 ap->cur_time++;
1964
1965 if (tr32(MAC_STATUS) & MAC_STATUS_RCVD_CFG) {
1966 rx_cfg_reg = tr32(MAC_RX_AUTO_NEG);
1967
1968 if (rx_cfg_reg != ap->ability_match_cfg) {
1969 ap->ability_match_cfg = rx_cfg_reg;
1970 ap->ability_match = 0;
1971 ap->ability_match_count = 0;
1972 } else {
1973 if (++ap->ability_match_count > 1) {
1974 ap->ability_match = 1;
1975 ap->ability_match_cfg = rx_cfg_reg;
1976 }
1977 }
1978 if (rx_cfg_reg & ANEG_CFG_ACK)
1979 ap->ack_match = 1;
1980 else
1981 ap->ack_match = 0;
1982
1983 ap->idle_match = 0;
1984 } else {
1985 ap->idle_match = 1;
1986 ap->ability_match_cfg = 0;
1987 ap->ability_match_count = 0;
1988 ap->ability_match = 0;
1989 ap->ack_match = 0;
1990
1991 rx_cfg_reg = 0;
1992 }
1993
1994 ap->rxconfig = rx_cfg_reg;
1995 ret = ANEG_OK;
1996
1997 switch(ap->state) {
1998 case ANEG_STATE_UNKNOWN:
1999 if (ap->flags & (MR_AN_ENABLE | MR_RESTART_AN))
2000 ap->state = ANEG_STATE_AN_ENABLE;
2001
2002 /* fallthru */
2003 case ANEG_STATE_AN_ENABLE:
2004 ap->flags &= ~(MR_AN_COMPLETE | MR_PAGE_RX);
2005 if (ap->flags & MR_AN_ENABLE) {
2006 ap->link_time = 0;
2007 ap->cur_time = 0;
2008 ap->ability_match_cfg = 0;
2009 ap->ability_match_count = 0;
2010 ap->ability_match = 0;
2011 ap->idle_match = 0;
2012 ap->ack_match = 0;
2013
2014 ap->state = ANEG_STATE_RESTART_INIT;
2015 } else {
2016 ap->state = ANEG_STATE_DISABLE_LINK_OK;
2017 }
2018 break;
2019
2020 case ANEG_STATE_RESTART_INIT:
2021 ap->link_time = ap->cur_time;
2022 ap->flags &= ~(MR_NP_LOADED);
2023 ap->txconfig = 0;
2024 tw32(MAC_TX_AUTO_NEG, 0);
2025 tp->mac_mode |= MAC_MODE_SEND_CONFIGS;
2026 tw32_f(MAC_MODE, tp->mac_mode);
2027 udelay(40);
2028
2029 ret = ANEG_TIMER_ENAB;
2030 ap->state = ANEG_STATE_RESTART;
2031
2032 /* fallthru */
2033 case ANEG_STATE_RESTART:
2034 delta = ap->cur_time - ap->link_time;
2035 if (delta > ANEG_STATE_SETTLE_TIME) {
2036 ap->state = ANEG_STATE_ABILITY_DETECT_INIT;
2037 } else {
2038 ret = ANEG_TIMER_ENAB;
2039 }
2040 break;
2041
2042 case ANEG_STATE_DISABLE_LINK_OK:
2043 ret = ANEG_DONE;
2044 break;
2045
2046 case ANEG_STATE_ABILITY_DETECT_INIT:
2047 ap->flags &= ~(MR_TOGGLE_TX);
2048 ap->txconfig = (ANEG_CFG_FD | ANEG_CFG_PS1);
2049 tw32(MAC_TX_AUTO_NEG, ap->txconfig);
2050 tp->mac_mode |= MAC_MODE_SEND_CONFIGS;
2051 tw32_f(MAC_MODE, tp->mac_mode);
2052 udelay(40);
2053
2054 ap->state = ANEG_STATE_ABILITY_DETECT;
2055 break;
2056
2057 case ANEG_STATE_ABILITY_DETECT:
2058 if (ap->ability_match != 0 && ap->rxconfig != 0) {
2059 ap->state = ANEG_STATE_ACK_DETECT_INIT;
2060 }
2061 break;
2062
2063 case ANEG_STATE_ACK_DETECT_INIT:
2064 ap->txconfig |= ANEG_CFG_ACK;
2065 tw32(MAC_TX_AUTO_NEG, ap->txconfig);
2066 tp->mac_mode |= MAC_MODE_SEND_CONFIGS;
2067 tw32_f(MAC_MODE, tp->mac_mode);
2068 udelay(40);
2069
2070 ap->state = ANEG_STATE_ACK_DETECT;
2071
2072 /* fallthru */
2073 case ANEG_STATE_ACK_DETECT:
2074 if (ap->ack_match != 0) {
2075 if ((ap->rxconfig & ~ANEG_CFG_ACK) ==
2076 (ap->ability_match_cfg & ~ANEG_CFG_ACK)) {
2077 ap->state = ANEG_STATE_COMPLETE_ACK_INIT;
2078 } else {
2079 ap->state = ANEG_STATE_AN_ENABLE;
2080 }
2081 } else if (ap->ability_match != 0 &&
2082 ap->rxconfig == 0) {
2083 ap->state = ANEG_STATE_AN_ENABLE;
2084 }
2085 break;
2086
2087 case ANEG_STATE_COMPLETE_ACK_INIT:
2088 if (ap->rxconfig & ANEG_CFG_INVAL) {
2089 ret = ANEG_FAILED;
2090 break;
2091 }
2092 ap->flags &= ~(MR_LP_ADV_FULL_DUPLEX |
2093 MR_LP_ADV_HALF_DUPLEX |
2094 MR_LP_ADV_SYM_PAUSE |
2095 MR_LP_ADV_ASYM_PAUSE |
2096 MR_LP_ADV_REMOTE_FAULT1 |
2097 MR_LP_ADV_REMOTE_FAULT2 |
2098 MR_LP_ADV_NEXT_PAGE |
2099 MR_TOGGLE_RX |
2100 MR_NP_RX);
2101 if (ap->rxconfig & ANEG_CFG_FD)
2102 ap->flags |= MR_LP_ADV_FULL_DUPLEX;
2103 if (ap->rxconfig & ANEG_CFG_HD)
2104 ap->flags |= MR_LP_ADV_HALF_DUPLEX;
2105 if (ap->rxconfig & ANEG_CFG_PS1)
2106 ap->flags |= MR_LP_ADV_SYM_PAUSE;
2107 if (ap->rxconfig & ANEG_CFG_PS2)
2108 ap->flags |= MR_LP_ADV_ASYM_PAUSE;
2109 if (ap->rxconfig & ANEG_CFG_RF1)
2110 ap->flags |= MR_LP_ADV_REMOTE_FAULT1;
2111 if (ap->rxconfig & ANEG_CFG_RF2)
2112 ap->flags |= MR_LP_ADV_REMOTE_FAULT2;
2113 if (ap->rxconfig & ANEG_CFG_NP)
2114 ap->flags |= MR_LP_ADV_NEXT_PAGE;
2115
2116 ap->link_time = ap->cur_time;
2117
2118 ap->flags ^= (MR_TOGGLE_TX);
2119 if (ap->rxconfig & 0x0008)
2120 ap->flags |= MR_TOGGLE_RX;
2121 if (ap->rxconfig & ANEG_CFG_NP)
2122 ap->flags |= MR_NP_RX;
2123 ap->flags |= MR_PAGE_RX;
2124
2125 ap->state = ANEG_STATE_COMPLETE_ACK;
2126 ret = ANEG_TIMER_ENAB;
2127 break;
2128
2129 case ANEG_STATE_COMPLETE_ACK:
2130 if (ap->ability_match != 0 &&
2131 ap->rxconfig == 0) {
2132 ap->state = ANEG_STATE_AN_ENABLE;
2133 break;
2134 }
2135 delta = ap->cur_time - ap->link_time;
2136 if (delta > ANEG_STATE_SETTLE_TIME) {
2137 if (!(ap->flags & (MR_LP_ADV_NEXT_PAGE))) {
2138 ap->state = ANEG_STATE_IDLE_DETECT_INIT;
2139 } else {
2140 if ((ap->txconfig & ANEG_CFG_NP) == 0 &&
2141 !(ap->flags & MR_NP_RX)) {
2142 ap->state = ANEG_STATE_IDLE_DETECT_INIT;
2143 } else {
2144 ret = ANEG_FAILED;
2145 }
2146 }
2147 }
2148 break;
2149
2150 case ANEG_STATE_IDLE_DETECT_INIT:
2151 ap->link_time = ap->cur_time;
2152 tp->mac_mode &= ~MAC_MODE_SEND_CONFIGS;
2153 tw32_f(MAC_MODE, tp->mac_mode);
2154 udelay(40);
2155
2156 ap->state = ANEG_STATE_IDLE_DETECT;
2157 ret = ANEG_TIMER_ENAB;
2158 break;
2159
2160 case ANEG_STATE_IDLE_DETECT:
2161 if (ap->ability_match != 0 &&
2162 ap->rxconfig == 0) {
2163 ap->state = ANEG_STATE_AN_ENABLE;
2164 break;
2165 }
2166 delta = ap->cur_time - ap->link_time;
2167 if (delta > ANEG_STATE_SETTLE_TIME) {
2168 /* XXX another gem from the Broadcom driver :( */
2169 ap->state = ANEG_STATE_LINK_OK;
2170 }
2171 break;
2172
2173 case ANEG_STATE_LINK_OK:
2174 ap->flags |= (MR_AN_COMPLETE | MR_LINK_OK);
2175 ret = ANEG_DONE;
2176 break;
2177
2178 case ANEG_STATE_NEXT_PAGE_WAIT_INIT:
2179 /* ??? unimplemented */
2180 break;
2181
2182 case ANEG_STATE_NEXT_PAGE_WAIT:
2183 /* ??? unimplemented */
2184 break;
2185
2186 default:
2187 ret = ANEG_FAILED;
2188 break;
2189 };
2190
2191 return ret;
2192}
2193
2194static int fiber_autoneg(struct tg3 *tp, u32 *flags)
2195{
2196 int res = 0;
2197 struct tg3_fiber_aneginfo aninfo;
2198 int status = ANEG_FAILED;
2199 unsigned int tick;
2200 u32 tmp;
2201
2202 tw32_f(MAC_TX_AUTO_NEG, 0);
2203
2204 tmp = tp->mac_mode & ~MAC_MODE_PORT_MODE_MASK;
2205 tw32_f(MAC_MODE, tmp | MAC_MODE_PORT_MODE_GMII);
2206 udelay(40);
2207
2208 tw32_f(MAC_MODE, tp->mac_mode | MAC_MODE_SEND_CONFIGS);
2209 udelay(40);
2210
2211 memset(&aninfo, 0, sizeof(aninfo));
2212 aninfo.flags |= MR_AN_ENABLE;
2213 aninfo.state = ANEG_STATE_UNKNOWN;
2214 aninfo.cur_time = 0;
2215 tick = 0;
2216 while (++tick < 195000) {
2217 status = tg3_fiber_aneg_smachine(tp, &aninfo);
2218 if (status == ANEG_DONE || status == ANEG_FAILED)
2219 break;
2220
2221 udelay(1);
2222 }
2223
2224 tp->mac_mode &= ~MAC_MODE_SEND_CONFIGS;
2225 tw32_f(MAC_MODE, tp->mac_mode);
2226 udelay(40);
2227
2228 *flags = aninfo.flags;
2229
2230 if (status == ANEG_DONE &&
2231 (aninfo.flags & (MR_AN_COMPLETE | MR_LINK_OK |
2232 MR_LP_ADV_FULL_DUPLEX)))
2233 res = 1;
2234
2235 return res;
2236}
2237
2238static void tg3_init_bcm8002(struct tg3 *tp)
2239{
2240 u32 mac_status = tr32(MAC_STATUS);
2241 int i;
2242
2243 /* Reset when initting first time or we have a link. */
2244 if ((tp->tg3_flags & TG3_FLAG_INIT_COMPLETE) &&
2245 !(mac_status & MAC_STATUS_PCS_SYNCED))
2246 return;
2247
2248 /* Set PLL lock range. */
2249 tg3_writephy(tp, 0x16, 0x8007);
2250
2251 /* SW reset */
2252 tg3_writephy(tp, MII_BMCR, BMCR_RESET);
2253
2254 /* Wait for reset to complete. */
2255 /* XXX schedule_timeout() ... */
2256 for (i = 0; i < 500; i++)
2257 udelay(10);
2258
2259 /* Config mode; select PMA/Ch 1 regs. */
2260 tg3_writephy(tp, 0x10, 0x8411);
2261
2262 /* Enable auto-lock and comdet, select txclk for tx. */
2263 tg3_writephy(tp, 0x11, 0x0a10);
2264
2265 tg3_writephy(tp, 0x18, 0x00a0);
2266 tg3_writephy(tp, 0x16, 0x41ff);
2267
2268 /* Assert and deassert POR. */
2269 tg3_writephy(tp, 0x13, 0x0400);
2270 udelay(40);
2271 tg3_writephy(tp, 0x13, 0x0000);
2272
2273 tg3_writephy(tp, 0x11, 0x0a50);
2274 udelay(40);
2275 tg3_writephy(tp, 0x11, 0x0a10);
2276
2277 /* Wait for signal to stabilize */
2278 /* XXX schedule_timeout() ... */
2279 for (i = 0; i < 15000; i++)
2280 udelay(10);
2281
2282 /* Deselect the channel register so we can read the PHYID
2283 * later.
2284 */
2285 tg3_writephy(tp, 0x10, 0x8011);
2286}
2287
2288static int tg3_setup_fiber_hw_autoneg(struct tg3 *tp, u32 mac_status)
2289{
2290 u32 sg_dig_ctrl, sg_dig_status;
2291 u32 serdes_cfg, expected_sg_dig_ctrl;
2292 int workaround, port_a;
2293 int current_link_up;
2294
2295 serdes_cfg = 0;
2296 expected_sg_dig_ctrl = 0;
2297 workaround = 0;
2298 port_a = 1;
2299 current_link_up = 0;
2300
2301 if (tp->pci_chip_rev_id != CHIPREV_ID_5704_A0 &&
2302 tp->pci_chip_rev_id != CHIPREV_ID_5704_A1) {
2303 workaround = 1;
2304 if (tr32(TG3PCI_DUAL_MAC_CTRL) & DUAL_MAC_CTRL_ID)
2305 port_a = 0;
2306
2307 /* preserve bits 0-11,13,14 for signal pre-emphasis */
2308 /* preserve bits 20-23 for voltage regulator */
2309 serdes_cfg = tr32(MAC_SERDES_CFG) & 0x00f06fff;
2310 }
2311
2312 sg_dig_ctrl = tr32(SG_DIG_CTRL);
2313
2314 if (tp->link_config.autoneg != AUTONEG_ENABLE) {
2315 if (sg_dig_ctrl & (1 << 31)) {
2316 if (workaround) {
2317 u32 val = serdes_cfg;
2318
2319 if (port_a)
2320 val |= 0xc010000;
2321 else
2322 val |= 0x4010000;
2323 tw32_f(MAC_SERDES_CFG, val);
2324 }
2325 tw32_f(SG_DIG_CTRL, 0x01388400);
2326 }
2327 if (mac_status & MAC_STATUS_PCS_SYNCED) {
2328 tg3_setup_flow_control(tp, 0, 0);
2329 current_link_up = 1;
2330 }
2331 goto out;
2332 }
2333
2334 /* Want auto-negotiation. */
2335 expected_sg_dig_ctrl = 0x81388400;
2336
2337 /* Pause capability */
2338 expected_sg_dig_ctrl |= (1 << 11);
2339
2340 /* Asymettric pause */
2341 expected_sg_dig_ctrl |= (1 << 12);
2342
2343 if (sg_dig_ctrl != expected_sg_dig_ctrl) {
2344 if (workaround)
2345 tw32_f(MAC_SERDES_CFG, serdes_cfg | 0xc011000);
2346 tw32_f(SG_DIG_CTRL, expected_sg_dig_ctrl | (1 << 30));
2347 udelay(5);
2348 tw32_f(SG_DIG_CTRL, expected_sg_dig_ctrl);
2349
2350 tp->tg3_flags2 |= TG3_FLG2_PHY_JUST_INITTED;
2351 } else if (mac_status & (MAC_STATUS_PCS_SYNCED |
2352 MAC_STATUS_SIGNAL_DET)) {
2353 int i;
2354
2355 /* Giver time to negotiate (~200ms) */
2356 for (i = 0; i < 40000; i++) {
2357 sg_dig_status = tr32(SG_DIG_STATUS);
2358 if (sg_dig_status & (0x3))
2359 break;
2360 udelay(5);
2361 }
2362 mac_status = tr32(MAC_STATUS);
2363
2364 if ((sg_dig_status & (1 << 1)) &&
2365 (mac_status & MAC_STATUS_PCS_SYNCED)) {
2366 u32 local_adv, remote_adv;
2367
2368 local_adv = ADVERTISE_PAUSE_CAP;
2369 remote_adv = 0;
2370 if (sg_dig_status & (1 << 19))
2371 remote_adv |= LPA_PAUSE_CAP;
2372 if (sg_dig_status & (1 << 20))
2373 remote_adv |= LPA_PAUSE_ASYM;
2374
2375 tg3_setup_flow_control(tp, local_adv, remote_adv);
2376 current_link_up = 1;
2377 tp->tg3_flags2 &= ~TG3_FLG2_PHY_JUST_INITTED;
2378 } else if (!(sg_dig_status & (1 << 1))) {
2379 if (tp->tg3_flags2 & TG3_FLG2_PHY_JUST_INITTED)
2380 tp->tg3_flags2 &= ~TG3_FLG2_PHY_JUST_INITTED;
2381 else {
2382 if (workaround) {
2383 u32 val = serdes_cfg;
2384
2385 if (port_a)
2386 val |= 0xc010000;
2387 else
2388 val |= 0x4010000;
2389
2390 tw32_f(MAC_SERDES_CFG, val);
2391 }
2392
2393 tw32_f(SG_DIG_CTRL, 0x01388400);
2394 udelay(40);
2395
2396 /* Link parallel detection - link is up */
2397 /* only if we have PCS_SYNC and not */
2398 /* receiving config code words */
2399 mac_status = tr32(MAC_STATUS);
2400 if ((mac_status & MAC_STATUS_PCS_SYNCED) &&
2401 !(mac_status & MAC_STATUS_RCVD_CFG)) {
2402 tg3_setup_flow_control(tp, 0, 0);
2403 current_link_up = 1;
2404 }
2405 }
2406 }
2407 }
2408
2409out:
2410 return current_link_up;
2411}
2412
2413static int tg3_setup_fiber_by_hand(struct tg3 *tp, u32 mac_status)
2414{
2415 int current_link_up = 0;
2416
2417 if (!(mac_status & MAC_STATUS_PCS_SYNCED)) {
2418 tp->tg3_flags &= ~TG3_FLAG_GOT_SERDES_FLOWCTL;
2419 goto out;
2420 }
2421
2422 if (tp->link_config.autoneg == AUTONEG_ENABLE) {
2423 u32 flags;
2424 int i;
2425
2426 if (fiber_autoneg(tp, &flags)) {
2427 u32 local_adv, remote_adv;
2428
2429 local_adv = ADVERTISE_PAUSE_CAP;
2430 remote_adv = 0;
2431 if (flags & MR_LP_ADV_SYM_PAUSE)
2432 remote_adv |= LPA_PAUSE_CAP;
2433 if (flags & MR_LP_ADV_ASYM_PAUSE)
2434 remote_adv |= LPA_PAUSE_ASYM;
2435
2436 tg3_setup_flow_control(tp, local_adv, remote_adv);
2437
2438 tp->tg3_flags |= TG3_FLAG_GOT_SERDES_FLOWCTL;
2439 current_link_up = 1;
2440 }
2441 for (i = 0; i < 30; i++) {
2442 udelay(20);
2443 tw32_f(MAC_STATUS,
2444 (MAC_STATUS_SYNC_CHANGED |
2445 MAC_STATUS_CFG_CHANGED));
2446 udelay(40);
2447 if ((tr32(MAC_STATUS) &
2448 (MAC_STATUS_SYNC_CHANGED |
2449 MAC_STATUS_CFG_CHANGED)) == 0)
2450 break;
2451 }
2452
2453 mac_status = tr32(MAC_STATUS);
2454 if (current_link_up == 0 &&
2455 (mac_status & MAC_STATUS_PCS_SYNCED) &&
2456 !(mac_status & MAC_STATUS_RCVD_CFG))
2457 current_link_up = 1;
2458 } else {
2459 /* Forcing 1000FD link up. */
2460 current_link_up = 1;
2461 tp->tg3_flags |= TG3_FLAG_GOT_SERDES_FLOWCTL;
2462
2463 tw32_f(MAC_MODE, (tp->mac_mode | MAC_MODE_SEND_CONFIGS));
2464 udelay(40);
2465 }
2466
2467out:
2468 return current_link_up;
2469}
2470
2471static int tg3_setup_fiber_phy(struct tg3 *tp, int force_reset)
2472{
2473 u32 orig_pause_cfg;
2474 u16 orig_active_speed;
2475 u8 orig_active_duplex;
2476 u32 mac_status;
2477 int current_link_up;
2478 int i;
2479
2480 orig_pause_cfg =
2481 (tp->tg3_flags & (TG3_FLAG_RX_PAUSE |
2482 TG3_FLAG_TX_PAUSE));
2483 orig_active_speed = tp->link_config.active_speed;
2484 orig_active_duplex = tp->link_config.active_duplex;
2485
2486 if (!(tp->tg3_flags2 & TG3_FLG2_HW_AUTONEG) &&
2487 netif_carrier_ok(tp->dev) &&
2488 (tp->tg3_flags & TG3_FLAG_INIT_COMPLETE)) {
2489 mac_status = tr32(MAC_STATUS);
2490 mac_status &= (MAC_STATUS_PCS_SYNCED |
2491 MAC_STATUS_SIGNAL_DET |
2492 MAC_STATUS_CFG_CHANGED |
2493 MAC_STATUS_RCVD_CFG);
2494 if (mac_status == (MAC_STATUS_PCS_SYNCED |
2495 MAC_STATUS_SIGNAL_DET)) {
2496 tw32_f(MAC_STATUS, (MAC_STATUS_SYNC_CHANGED |
2497 MAC_STATUS_CFG_CHANGED));
2498 return 0;
2499 }
2500 }
2501
2502 tw32_f(MAC_TX_AUTO_NEG, 0);
2503
2504 tp->mac_mode &= ~(MAC_MODE_PORT_MODE_MASK | MAC_MODE_HALF_DUPLEX);
2505 tp->mac_mode |= MAC_MODE_PORT_MODE_TBI;
2506 tw32_f(MAC_MODE, tp->mac_mode);
2507 udelay(40);
2508
2509 if (tp->phy_id == PHY_ID_BCM8002)
2510 tg3_init_bcm8002(tp);
2511
2512 /* Enable link change event even when serdes polling. */
2513 tw32_f(MAC_EVENT, MAC_EVENT_LNKSTATE_CHANGED);
2514 udelay(40);
2515
2516 current_link_up = 0;
2517 mac_status = tr32(MAC_STATUS);
2518
2519 if (tp->tg3_flags2 & TG3_FLG2_HW_AUTONEG)
2520 current_link_up = tg3_setup_fiber_hw_autoneg(tp, mac_status);
2521 else
2522 current_link_up = tg3_setup_fiber_by_hand(tp, mac_status);
2523
2524 tp->mac_mode &= ~MAC_MODE_LINK_POLARITY;
2525 tw32_f(MAC_MODE, tp->mac_mode);
2526 udelay(40);
2527
2528 tp->hw_status->status =
2529 (SD_STATUS_UPDATED |
2530 (tp->hw_status->status & ~SD_STATUS_LINK_CHG));
2531
2532 for (i = 0; i < 100; i++) {
2533 tw32_f(MAC_STATUS, (MAC_STATUS_SYNC_CHANGED |
2534 MAC_STATUS_CFG_CHANGED));
2535 udelay(5);
2536 if ((tr32(MAC_STATUS) & (MAC_STATUS_SYNC_CHANGED |
2537 MAC_STATUS_CFG_CHANGED)) == 0)
2538 break;
2539 }
2540
2541 mac_status = tr32(MAC_STATUS);
2542 if ((mac_status & MAC_STATUS_PCS_SYNCED) == 0) {
2543 current_link_up = 0;
2544 if (tp->link_config.autoneg == AUTONEG_ENABLE) {
2545 tw32_f(MAC_MODE, (tp->mac_mode |
2546 MAC_MODE_SEND_CONFIGS));
2547 udelay(1);
2548 tw32_f(MAC_MODE, tp->mac_mode);
2549 }
2550 }
2551
2552 if (current_link_up == 1) {
2553 tp->link_config.active_speed = SPEED_1000;
2554 tp->link_config.active_duplex = DUPLEX_FULL;
2555 tw32(MAC_LED_CTRL, (tp->led_ctrl |
2556 LED_CTRL_LNKLED_OVERRIDE |
2557 LED_CTRL_1000MBPS_ON));
2558 } else {
2559 tp->link_config.active_speed = SPEED_INVALID;
2560 tp->link_config.active_duplex = DUPLEX_INVALID;
2561 tw32(MAC_LED_CTRL, (tp->led_ctrl |
2562 LED_CTRL_LNKLED_OVERRIDE |
2563 LED_CTRL_TRAFFIC_OVERRIDE));
2564 }
2565
2566 if (current_link_up != netif_carrier_ok(tp->dev)) {
2567 if (current_link_up)
2568 netif_carrier_on(tp->dev);
2569 else
2570 netif_carrier_off(tp->dev);
2571 tg3_link_report(tp);
2572 } else {
2573 u32 now_pause_cfg =
2574 tp->tg3_flags & (TG3_FLAG_RX_PAUSE |
2575 TG3_FLAG_TX_PAUSE);
2576 if (orig_pause_cfg != now_pause_cfg ||
2577 orig_active_speed != tp->link_config.active_speed ||
2578 orig_active_duplex != tp->link_config.active_duplex)
2579 tg3_link_report(tp);
2580 }
2581
2582 return 0;
2583}
2584
Michael Chan747e8f82005-07-25 12:33:22 -07002585static int tg3_setup_fiber_mii_phy(struct tg3 *tp, int force_reset)
2586{
2587 int current_link_up, err = 0;
2588 u32 bmsr, bmcr;
2589 u16 current_speed;
2590 u8 current_duplex;
2591
2592 tp->mac_mode |= MAC_MODE_PORT_MODE_GMII;
2593 tw32_f(MAC_MODE, tp->mac_mode);
2594 udelay(40);
2595
2596 tw32(MAC_EVENT, 0);
2597
2598 tw32_f(MAC_STATUS,
2599 (MAC_STATUS_SYNC_CHANGED |
2600 MAC_STATUS_CFG_CHANGED |
2601 MAC_STATUS_MI_COMPLETION |
2602 MAC_STATUS_LNKSTATE_CHANGED));
2603 udelay(40);
2604
2605 if (force_reset)
2606 tg3_phy_reset(tp);
2607
2608 current_link_up = 0;
2609 current_speed = SPEED_INVALID;
2610 current_duplex = DUPLEX_INVALID;
2611
2612 err |= tg3_readphy(tp, MII_BMSR, &bmsr);
2613 err |= tg3_readphy(tp, MII_BMSR, &bmsr);
2614
2615 err |= tg3_readphy(tp, MII_BMCR, &bmcr);
2616
2617 if ((tp->link_config.autoneg == AUTONEG_ENABLE) && !force_reset &&
2618 (tp->tg3_flags2 & TG3_FLG2_PARALLEL_DETECT)) {
2619 /* do nothing, just check for link up at the end */
2620 } else if (tp->link_config.autoneg == AUTONEG_ENABLE) {
2621 u32 adv, new_adv;
2622
2623 err |= tg3_readphy(tp, MII_ADVERTISE, &adv);
2624 new_adv = adv & ~(ADVERTISE_1000XFULL | ADVERTISE_1000XHALF |
2625 ADVERTISE_1000XPAUSE |
2626 ADVERTISE_1000XPSE_ASYM |
2627 ADVERTISE_SLCT);
2628
2629 /* Always advertise symmetric PAUSE just like copper */
2630 new_adv |= ADVERTISE_1000XPAUSE;
2631
2632 if (tp->link_config.advertising & ADVERTISED_1000baseT_Half)
2633 new_adv |= ADVERTISE_1000XHALF;
2634 if (tp->link_config.advertising & ADVERTISED_1000baseT_Full)
2635 new_adv |= ADVERTISE_1000XFULL;
2636
2637 if ((new_adv != adv) || !(bmcr & BMCR_ANENABLE)) {
2638 tg3_writephy(tp, MII_ADVERTISE, new_adv);
2639 bmcr |= BMCR_ANENABLE | BMCR_ANRESTART;
2640 tg3_writephy(tp, MII_BMCR, bmcr);
2641
2642 tw32_f(MAC_EVENT, MAC_EVENT_LNKSTATE_CHANGED);
2643 tp->tg3_flags2 |= TG3_FLG2_PHY_JUST_INITTED;
2644 tp->tg3_flags2 &= ~TG3_FLG2_PARALLEL_DETECT;
2645
2646 return err;
2647 }
2648 } else {
2649 u32 new_bmcr;
2650
2651 bmcr &= ~BMCR_SPEED1000;
2652 new_bmcr = bmcr & ~(BMCR_ANENABLE | BMCR_FULLDPLX);
2653
2654 if (tp->link_config.duplex == DUPLEX_FULL)
2655 new_bmcr |= BMCR_FULLDPLX;
2656
2657 if (new_bmcr != bmcr) {
2658 /* BMCR_SPEED1000 is a reserved bit that needs
2659 * to be set on write.
2660 */
2661 new_bmcr |= BMCR_SPEED1000;
2662
2663 /* Force a linkdown */
2664 if (netif_carrier_ok(tp->dev)) {
2665 u32 adv;
2666
2667 err |= tg3_readphy(tp, MII_ADVERTISE, &adv);
2668 adv &= ~(ADVERTISE_1000XFULL |
2669 ADVERTISE_1000XHALF |
2670 ADVERTISE_SLCT);
2671 tg3_writephy(tp, MII_ADVERTISE, adv);
2672 tg3_writephy(tp, MII_BMCR, bmcr |
2673 BMCR_ANRESTART |
2674 BMCR_ANENABLE);
2675 udelay(10);
2676 netif_carrier_off(tp->dev);
2677 }
2678 tg3_writephy(tp, MII_BMCR, new_bmcr);
2679 bmcr = new_bmcr;
2680 err |= tg3_readphy(tp, MII_BMSR, &bmsr);
2681 err |= tg3_readphy(tp, MII_BMSR, &bmsr);
2682 tp->tg3_flags2 &= ~TG3_FLG2_PARALLEL_DETECT;
2683 }
2684 }
2685
2686 if (bmsr & BMSR_LSTATUS) {
2687 current_speed = SPEED_1000;
2688 current_link_up = 1;
2689 if (bmcr & BMCR_FULLDPLX)
2690 current_duplex = DUPLEX_FULL;
2691 else
2692 current_duplex = DUPLEX_HALF;
2693
2694 if (bmcr & BMCR_ANENABLE) {
2695 u32 local_adv, remote_adv, common;
2696
2697 err |= tg3_readphy(tp, MII_ADVERTISE, &local_adv);
2698 err |= tg3_readphy(tp, MII_LPA, &remote_adv);
2699 common = local_adv & remote_adv;
2700 if (common & (ADVERTISE_1000XHALF |
2701 ADVERTISE_1000XFULL)) {
2702 if (common & ADVERTISE_1000XFULL)
2703 current_duplex = DUPLEX_FULL;
2704 else
2705 current_duplex = DUPLEX_HALF;
2706
2707 tg3_setup_flow_control(tp, local_adv,
2708 remote_adv);
2709 }
2710 else
2711 current_link_up = 0;
2712 }
2713 }
2714
2715 tp->mac_mode &= ~MAC_MODE_HALF_DUPLEX;
2716 if (tp->link_config.active_duplex == DUPLEX_HALF)
2717 tp->mac_mode |= MAC_MODE_HALF_DUPLEX;
2718
2719 tw32_f(MAC_MODE, tp->mac_mode);
2720 udelay(40);
2721
2722 tw32_f(MAC_EVENT, MAC_EVENT_LNKSTATE_CHANGED);
2723
2724 tp->link_config.active_speed = current_speed;
2725 tp->link_config.active_duplex = current_duplex;
2726
2727 if (current_link_up != netif_carrier_ok(tp->dev)) {
2728 if (current_link_up)
2729 netif_carrier_on(tp->dev);
2730 else {
2731 netif_carrier_off(tp->dev);
2732 tp->tg3_flags2 &= ~TG3_FLG2_PARALLEL_DETECT;
2733 }
2734 tg3_link_report(tp);
2735 }
2736 return err;
2737}
2738
2739static void tg3_serdes_parallel_detect(struct tg3 *tp)
2740{
2741 if (tp->tg3_flags2 & TG3_FLG2_PHY_JUST_INITTED) {
2742 /* Give autoneg time to complete. */
2743 tp->tg3_flags2 &= ~TG3_FLG2_PHY_JUST_INITTED;
2744 return;
2745 }
2746 if (!netif_carrier_ok(tp->dev) &&
2747 (tp->link_config.autoneg == AUTONEG_ENABLE)) {
2748 u32 bmcr;
2749
2750 tg3_readphy(tp, MII_BMCR, &bmcr);
2751 if (bmcr & BMCR_ANENABLE) {
2752 u32 phy1, phy2;
2753
2754 /* Select shadow register 0x1f */
2755 tg3_writephy(tp, 0x1c, 0x7c00);
2756 tg3_readphy(tp, 0x1c, &phy1);
2757
2758 /* Select expansion interrupt status register */
2759 tg3_writephy(tp, 0x17, 0x0f01);
2760 tg3_readphy(tp, 0x15, &phy2);
2761 tg3_readphy(tp, 0x15, &phy2);
2762
2763 if ((phy1 & 0x10) && !(phy2 & 0x20)) {
2764 /* We have signal detect and not receiving
2765 * config code words, link is up by parallel
2766 * detection.
2767 */
2768
2769 bmcr &= ~BMCR_ANENABLE;
2770 bmcr |= BMCR_SPEED1000 | BMCR_FULLDPLX;
2771 tg3_writephy(tp, MII_BMCR, bmcr);
2772 tp->tg3_flags2 |= TG3_FLG2_PARALLEL_DETECT;
2773 }
2774 }
2775 }
2776 else if (netif_carrier_ok(tp->dev) &&
2777 (tp->link_config.autoneg == AUTONEG_ENABLE) &&
2778 (tp->tg3_flags2 & TG3_FLG2_PARALLEL_DETECT)) {
2779 u32 phy2;
2780
2781 /* Select expansion interrupt status register */
2782 tg3_writephy(tp, 0x17, 0x0f01);
2783 tg3_readphy(tp, 0x15, &phy2);
2784 if (phy2 & 0x20) {
2785 u32 bmcr;
2786
2787 /* Config code words received, turn on autoneg. */
2788 tg3_readphy(tp, MII_BMCR, &bmcr);
2789 tg3_writephy(tp, MII_BMCR, bmcr | BMCR_ANENABLE);
2790
2791 tp->tg3_flags2 &= ~TG3_FLG2_PARALLEL_DETECT;
2792
2793 }
2794 }
2795}
2796
Linus Torvalds1da177e2005-04-16 15:20:36 -07002797static int tg3_setup_phy(struct tg3 *tp, int force_reset)
2798{
2799 int err;
2800
2801 if (tp->tg3_flags2 & TG3_FLG2_PHY_SERDES) {
2802 err = tg3_setup_fiber_phy(tp, force_reset);
Michael Chan747e8f82005-07-25 12:33:22 -07002803 } else if (tp->tg3_flags2 & TG3_FLG2_MII_SERDES) {
2804 err = tg3_setup_fiber_mii_phy(tp, force_reset);
Linus Torvalds1da177e2005-04-16 15:20:36 -07002805 } else {
2806 err = tg3_setup_copper_phy(tp, force_reset);
2807 }
2808
2809 if (tp->link_config.active_speed == SPEED_1000 &&
2810 tp->link_config.active_duplex == DUPLEX_HALF)
2811 tw32(MAC_TX_LENGTHS,
2812 ((2 << TX_LENGTHS_IPG_CRS_SHIFT) |
2813 (6 << TX_LENGTHS_IPG_SHIFT) |
2814 (0xff << TX_LENGTHS_SLOT_TIME_SHIFT)));
2815 else
2816 tw32(MAC_TX_LENGTHS,
2817 ((2 << TX_LENGTHS_IPG_CRS_SHIFT) |
2818 (6 << TX_LENGTHS_IPG_SHIFT) |
2819 (32 << TX_LENGTHS_SLOT_TIME_SHIFT)));
2820
2821 if (!(tp->tg3_flags2 & TG3_FLG2_5705_PLUS)) {
2822 if (netif_carrier_ok(tp->dev)) {
2823 tw32(HOSTCC_STAT_COAL_TICKS,
David S. Miller15f98502005-05-18 22:49:26 -07002824 tp->coal.stats_block_coalesce_usecs);
Linus Torvalds1da177e2005-04-16 15:20:36 -07002825 } else {
2826 tw32(HOSTCC_STAT_COAL_TICKS, 0);
2827 }
2828 }
2829
2830 return err;
2831}
2832
2833/* Tigon3 never reports partial packet sends. So we do not
2834 * need special logic to handle SKBs that have not had all
2835 * of their frags sent yet, like SunGEM does.
2836 */
2837static void tg3_tx(struct tg3 *tp)
2838{
2839 u32 hw_idx = tp->hw_status->idx[0].tx_consumer;
2840 u32 sw_idx = tp->tx_cons;
2841
2842 while (sw_idx != hw_idx) {
2843 struct tx_ring_info *ri = &tp->tx_buffers[sw_idx];
2844 struct sk_buff *skb = ri->skb;
2845 int i;
2846
2847 if (unlikely(skb == NULL))
2848 BUG();
2849
2850 pci_unmap_single(tp->pdev,
2851 pci_unmap_addr(ri, mapping),
2852 skb_headlen(skb),
2853 PCI_DMA_TODEVICE);
2854
2855 ri->skb = NULL;
2856
2857 sw_idx = NEXT_TX(sw_idx);
2858
2859 for (i = 0; i < skb_shinfo(skb)->nr_frags; i++) {
2860 if (unlikely(sw_idx == hw_idx))
2861 BUG();
2862
2863 ri = &tp->tx_buffers[sw_idx];
2864 if (unlikely(ri->skb != NULL))
2865 BUG();
2866
2867 pci_unmap_page(tp->pdev,
2868 pci_unmap_addr(ri, mapping),
2869 skb_shinfo(skb)->frags[i].size,
2870 PCI_DMA_TODEVICE);
2871
2872 sw_idx = NEXT_TX(sw_idx);
2873 }
2874
David S. Millerf47c11e2005-06-24 20:18:35 -07002875 dev_kfree_skb(skb);
Linus Torvalds1da177e2005-04-16 15:20:36 -07002876 }
2877
2878 tp->tx_cons = sw_idx;
2879
Michael Chan51b91462005-09-01 17:41:28 -07002880 if (unlikely(netif_queue_stopped(tp->dev))) {
2881 spin_lock(&tp->tx_lock);
2882 if (netif_queue_stopped(tp->dev) &&
2883 (TX_BUFFS_AVAIL(tp) > TG3_TX_WAKEUP_THRESH))
2884 netif_wake_queue(tp->dev);
2885 spin_unlock(&tp->tx_lock);
2886 }
Linus Torvalds1da177e2005-04-16 15:20:36 -07002887}
2888
2889/* Returns size of skb allocated or < 0 on error.
2890 *
2891 * We only need to fill in the address because the other members
2892 * of the RX descriptor are invariant, see tg3_init_rings.
2893 *
2894 * Note the purposeful assymetry of cpu vs. chip accesses. For
2895 * posting buffers we only dirty the first cache line of the RX
2896 * descriptor (containing the address). Whereas for the RX status
2897 * buffers the cpu only reads the last cacheline of the RX descriptor
2898 * (to fetch the error flags, vlan tag, checksum, and opaque cookie).
2899 */
2900static int tg3_alloc_rx_skb(struct tg3 *tp, u32 opaque_key,
2901 int src_idx, u32 dest_idx_unmasked)
2902{
2903 struct tg3_rx_buffer_desc *desc;
2904 struct ring_info *map, *src_map;
2905 struct sk_buff *skb;
2906 dma_addr_t mapping;
2907 int skb_size, dest_idx;
2908
2909 src_map = NULL;
2910 switch (opaque_key) {
2911 case RXD_OPAQUE_RING_STD:
2912 dest_idx = dest_idx_unmasked % TG3_RX_RING_SIZE;
2913 desc = &tp->rx_std[dest_idx];
2914 map = &tp->rx_std_buffers[dest_idx];
2915 if (src_idx >= 0)
2916 src_map = &tp->rx_std_buffers[src_idx];
Michael Chan7e72aad2005-07-25 12:31:17 -07002917 skb_size = tp->rx_pkt_buf_sz;
Linus Torvalds1da177e2005-04-16 15:20:36 -07002918 break;
2919
2920 case RXD_OPAQUE_RING_JUMBO:
2921 dest_idx = dest_idx_unmasked % TG3_RX_JUMBO_RING_SIZE;
2922 desc = &tp->rx_jumbo[dest_idx];
2923 map = &tp->rx_jumbo_buffers[dest_idx];
2924 if (src_idx >= 0)
2925 src_map = &tp->rx_jumbo_buffers[src_idx];
2926 skb_size = RX_JUMBO_PKT_BUF_SZ;
2927 break;
2928
2929 default:
2930 return -EINVAL;
2931 };
2932
2933 /* Do not overwrite any of the map or rp information
2934 * until we are sure we can commit to a new buffer.
2935 *
2936 * Callers depend upon this behavior and assume that
2937 * we leave everything unchanged if we fail.
2938 */
2939 skb = dev_alloc_skb(skb_size);
2940 if (skb == NULL)
2941 return -ENOMEM;
2942
2943 skb->dev = tp->dev;
2944 skb_reserve(skb, tp->rx_offset);
2945
2946 mapping = pci_map_single(tp->pdev, skb->data,
2947 skb_size - tp->rx_offset,
2948 PCI_DMA_FROMDEVICE);
2949
2950 map->skb = skb;
2951 pci_unmap_addr_set(map, mapping, mapping);
2952
2953 if (src_map != NULL)
2954 src_map->skb = NULL;
2955
2956 desc->addr_hi = ((u64)mapping >> 32);
2957 desc->addr_lo = ((u64)mapping & 0xffffffff);
2958
2959 return skb_size;
2960}
2961
2962/* We only need to move over in the address because the other
2963 * members of the RX descriptor are invariant. See notes above
2964 * tg3_alloc_rx_skb for full details.
2965 */
2966static void tg3_recycle_rx(struct tg3 *tp, u32 opaque_key,
2967 int src_idx, u32 dest_idx_unmasked)
2968{
2969 struct tg3_rx_buffer_desc *src_desc, *dest_desc;
2970 struct ring_info *src_map, *dest_map;
2971 int dest_idx;
2972
2973 switch (opaque_key) {
2974 case RXD_OPAQUE_RING_STD:
2975 dest_idx = dest_idx_unmasked % TG3_RX_RING_SIZE;
2976 dest_desc = &tp->rx_std[dest_idx];
2977 dest_map = &tp->rx_std_buffers[dest_idx];
2978 src_desc = &tp->rx_std[src_idx];
2979 src_map = &tp->rx_std_buffers[src_idx];
2980 break;
2981
2982 case RXD_OPAQUE_RING_JUMBO:
2983 dest_idx = dest_idx_unmasked % TG3_RX_JUMBO_RING_SIZE;
2984 dest_desc = &tp->rx_jumbo[dest_idx];
2985 dest_map = &tp->rx_jumbo_buffers[dest_idx];
2986 src_desc = &tp->rx_jumbo[src_idx];
2987 src_map = &tp->rx_jumbo_buffers[src_idx];
2988 break;
2989
2990 default:
2991 return;
2992 };
2993
2994 dest_map->skb = src_map->skb;
2995 pci_unmap_addr_set(dest_map, mapping,
2996 pci_unmap_addr(src_map, mapping));
2997 dest_desc->addr_hi = src_desc->addr_hi;
2998 dest_desc->addr_lo = src_desc->addr_lo;
2999
3000 src_map->skb = NULL;
3001}
3002
3003#if TG3_VLAN_TAG_USED
3004static int tg3_vlan_rx(struct tg3 *tp, struct sk_buff *skb, u16 vlan_tag)
3005{
3006 return vlan_hwaccel_receive_skb(skb, tp->vlgrp, vlan_tag);
3007}
3008#endif
3009
3010/* The RX ring scheme is composed of multiple rings which post fresh
3011 * buffers to the chip, and one special ring the chip uses to report
3012 * status back to the host.
3013 *
3014 * The special ring reports the status of received packets to the
3015 * host. The chip does not write into the original descriptor the
3016 * RX buffer was obtained from. The chip simply takes the original
3017 * descriptor as provided by the host, updates the status and length
3018 * field, then writes this into the next status ring entry.
3019 *
3020 * Each ring the host uses to post buffers to the chip is described
3021 * by a TG3_BDINFO entry in the chips SRAM area. When a packet arrives,
3022 * it is first placed into the on-chip ram. When the packet's length
3023 * is known, it walks down the TG3_BDINFO entries to select the ring.
3024 * Each TG3_BDINFO specifies a MAXLEN field and the first TG3_BDINFO
3025 * which is within the range of the new packet's length is chosen.
3026 *
3027 * The "separate ring for rx status" scheme may sound queer, but it makes
3028 * sense from a cache coherency perspective. If only the host writes
3029 * to the buffer post rings, and only the chip writes to the rx status
3030 * rings, then cache lines never move beyond shared-modified state.
3031 * If both the host and chip were to write into the same ring, cache line
3032 * eviction could occur since both entities want it in an exclusive state.
3033 */
3034static int tg3_rx(struct tg3 *tp, int budget)
3035{
3036 u32 work_mask;
Michael Chan483ba502005-04-25 15:14:03 -07003037 u32 sw_idx = tp->rx_rcb_ptr;
3038 u16 hw_idx;
Linus Torvalds1da177e2005-04-16 15:20:36 -07003039 int received;
3040
3041 hw_idx = tp->hw_status->idx[0].rx_producer;
3042 /*
3043 * We need to order the read of hw_idx and the read of
3044 * the opaque cookie.
3045 */
3046 rmb();
Linus Torvalds1da177e2005-04-16 15:20:36 -07003047 work_mask = 0;
3048 received = 0;
3049 while (sw_idx != hw_idx && budget > 0) {
3050 struct tg3_rx_buffer_desc *desc = &tp->rx_rcb[sw_idx];
3051 unsigned int len;
3052 struct sk_buff *skb;
3053 dma_addr_t dma_addr;
3054 u32 opaque_key, desc_idx, *post_ptr;
3055
3056 desc_idx = desc->opaque & RXD_OPAQUE_INDEX_MASK;
3057 opaque_key = desc->opaque & RXD_OPAQUE_RING_MASK;
3058 if (opaque_key == RXD_OPAQUE_RING_STD) {
3059 dma_addr = pci_unmap_addr(&tp->rx_std_buffers[desc_idx],
3060 mapping);
3061 skb = tp->rx_std_buffers[desc_idx].skb;
3062 post_ptr = &tp->rx_std_ptr;
3063 } else if (opaque_key == RXD_OPAQUE_RING_JUMBO) {
3064 dma_addr = pci_unmap_addr(&tp->rx_jumbo_buffers[desc_idx],
3065 mapping);
3066 skb = tp->rx_jumbo_buffers[desc_idx].skb;
3067 post_ptr = &tp->rx_jumbo_ptr;
3068 }
3069 else {
3070 goto next_pkt_nopost;
3071 }
3072
3073 work_mask |= opaque_key;
3074
3075 if ((desc->err_vlan & RXD_ERR_MASK) != 0 &&
3076 (desc->err_vlan != RXD_ERR_ODD_NIBBLE_RCVD_MII)) {
3077 drop_it:
3078 tg3_recycle_rx(tp, opaque_key,
3079 desc_idx, *post_ptr);
3080 drop_it_no_recycle:
3081 /* Other statistics kept track of by card. */
3082 tp->net_stats.rx_dropped++;
3083 goto next_pkt;
3084 }
3085
3086 len = ((desc->idx_len & RXD_LEN_MASK) >> RXD_LEN_SHIFT) - 4; /* omit crc */
3087
3088 if (len > RX_COPY_THRESHOLD
3089 && tp->rx_offset == 2
3090 /* rx_offset != 2 iff this is a 5701 card running
3091 * in PCI-X mode [see tg3_get_invariants()] */
3092 ) {
3093 int skb_size;
3094
3095 skb_size = tg3_alloc_rx_skb(tp, opaque_key,
3096 desc_idx, *post_ptr);
3097 if (skb_size < 0)
3098 goto drop_it;
3099
3100 pci_unmap_single(tp->pdev, dma_addr,
3101 skb_size - tp->rx_offset,
3102 PCI_DMA_FROMDEVICE);
3103
3104 skb_put(skb, len);
3105 } else {
3106 struct sk_buff *copy_skb;
3107
3108 tg3_recycle_rx(tp, opaque_key,
3109 desc_idx, *post_ptr);
3110
3111 copy_skb = dev_alloc_skb(len + 2);
3112 if (copy_skb == NULL)
3113 goto drop_it_no_recycle;
3114
3115 copy_skb->dev = tp->dev;
3116 skb_reserve(copy_skb, 2);
3117 skb_put(copy_skb, len);
3118 pci_dma_sync_single_for_cpu(tp->pdev, dma_addr, len, PCI_DMA_FROMDEVICE);
3119 memcpy(copy_skb->data, skb->data, len);
3120 pci_dma_sync_single_for_device(tp->pdev, dma_addr, len, PCI_DMA_FROMDEVICE);
3121
3122 /* We'll reuse the original ring buffer. */
3123 skb = copy_skb;
3124 }
3125
3126 if ((tp->tg3_flags & TG3_FLAG_RX_CHECKSUMS) &&
3127 (desc->type_flags & RXD_FLAG_TCPUDP_CSUM) &&
3128 (((desc->ip_tcp_csum & RXD_TCPCSUM_MASK)
3129 >> RXD_TCPCSUM_SHIFT) == 0xffff))
3130 skb->ip_summed = CHECKSUM_UNNECESSARY;
3131 else
3132 skb->ip_summed = CHECKSUM_NONE;
3133
3134 skb->protocol = eth_type_trans(skb, tp->dev);
3135#if TG3_VLAN_TAG_USED
3136 if (tp->vlgrp != NULL &&
3137 desc->type_flags & RXD_FLAG_VLAN) {
3138 tg3_vlan_rx(tp, skb,
3139 desc->err_vlan & RXD_VLAN_MASK);
3140 } else
3141#endif
3142 netif_receive_skb(skb);
3143
3144 tp->dev->last_rx = jiffies;
3145 received++;
3146 budget--;
3147
3148next_pkt:
3149 (*post_ptr)++;
3150next_pkt_nopost:
Michael Chan483ba502005-04-25 15:14:03 -07003151 sw_idx++;
3152 sw_idx %= TG3_RX_RCB_RING_SIZE(tp);
Michael Chan52f6d692005-04-25 15:14:32 -07003153
3154 /* Refresh hw_idx to see if there is new work */
3155 if (sw_idx == hw_idx) {
3156 hw_idx = tp->hw_status->idx[0].rx_producer;
3157 rmb();
3158 }
Linus Torvalds1da177e2005-04-16 15:20:36 -07003159 }
3160
3161 /* ACK the status ring. */
Michael Chan483ba502005-04-25 15:14:03 -07003162 tp->rx_rcb_ptr = sw_idx;
3163 tw32_rx_mbox(MAILBOX_RCVRET_CON_IDX_0 + TG3_64BIT_REG_LOW, sw_idx);
Linus Torvalds1da177e2005-04-16 15:20:36 -07003164
3165 /* Refill RX ring(s). */
3166 if (work_mask & RXD_OPAQUE_RING_STD) {
3167 sw_idx = tp->rx_std_ptr % TG3_RX_RING_SIZE;
3168 tw32_rx_mbox(MAILBOX_RCV_STD_PROD_IDX + TG3_64BIT_REG_LOW,
3169 sw_idx);
3170 }
3171 if (work_mask & RXD_OPAQUE_RING_JUMBO) {
3172 sw_idx = tp->rx_jumbo_ptr % TG3_RX_JUMBO_RING_SIZE;
3173 tw32_rx_mbox(MAILBOX_RCV_JUMBO_PROD_IDX + TG3_64BIT_REG_LOW,
3174 sw_idx);
3175 }
3176 mmiowb();
3177
3178 return received;
3179}
3180
3181static int tg3_poll(struct net_device *netdev, int *budget)
3182{
3183 struct tg3 *tp = netdev_priv(netdev);
3184 struct tg3_hw_status *sblk = tp->hw_status;
Linus Torvalds1da177e2005-04-16 15:20:36 -07003185 int done;
3186
Linus Torvalds1da177e2005-04-16 15:20:36 -07003187 /* handle link change and other phy events */
3188 if (!(tp->tg3_flags &
3189 (TG3_FLAG_USE_LINKCHG_REG |
3190 TG3_FLAG_POLL_SERDES))) {
3191 if (sblk->status & SD_STATUS_LINK_CHG) {
3192 sblk->status = SD_STATUS_UPDATED |
3193 (sblk->status & ~SD_STATUS_LINK_CHG);
David S. Millerf47c11e2005-06-24 20:18:35 -07003194 spin_lock(&tp->lock);
Linus Torvalds1da177e2005-04-16 15:20:36 -07003195 tg3_setup_phy(tp, 0);
David S. Millerf47c11e2005-06-24 20:18:35 -07003196 spin_unlock(&tp->lock);
Linus Torvalds1da177e2005-04-16 15:20:36 -07003197 }
3198 }
3199
3200 /* run TX completion thread */
3201 if (sblk->idx[0].tx_consumer != tp->tx_cons) {
Linus Torvalds1da177e2005-04-16 15:20:36 -07003202 tg3_tx(tp);
Linus Torvalds1da177e2005-04-16 15:20:36 -07003203 }
3204
Linus Torvalds1da177e2005-04-16 15:20:36 -07003205 /* run RX thread, within the bounds set by NAPI.
3206 * All RX "locking" is done by ensuring outside
3207 * code synchronizes with dev->poll()
3208 */
Linus Torvalds1da177e2005-04-16 15:20:36 -07003209 if (sblk->idx[0].rx_producer != tp->rx_rcb_ptr) {
3210 int orig_budget = *budget;
3211 int work_done;
3212
3213 if (orig_budget > netdev->quota)
3214 orig_budget = netdev->quota;
3215
3216 work_done = tg3_rx(tp, orig_budget);
3217
3218 *budget -= work_done;
3219 netdev->quota -= work_done;
Linus Torvalds1da177e2005-04-16 15:20:36 -07003220 }
3221
David S. Millerf7383c22005-05-18 22:50:53 -07003222 if (tp->tg3_flags & TG3_FLAG_TAGGED_STATUS)
3223 tp->last_tag = sblk->status_tag;
3224 rmb();
David S. Millercd024c82005-06-24 20:17:10 -07003225 sblk->status &= ~SD_STATUS_UPDATED;
David S. Millerf7383c22005-05-18 22:50:53 -07003226
Linus Torvalds1da177e2005-04-16 15:20:36 -07003227 /* if no more work, tell net stack and NIC we're done */
David S. Millerf7383c22005-05-18 22:50:53 -07003228 done = !tg3_has_work(tp);
Linus Torvalds1da177e2005-04-16 15:20:36 -07003229 if (done) {
David S. Millerf47c11e2005-06-24 20:18:35 -07003230 spin_lock(&tp->lock);
3231 netif_rx_complete(netdev);
Linus Torvalds1da177e2005-04-16 15:20:36 -07003232 tg3_restart_ints(tp);
David S. Millerf47c11e2005-06-24 20:18:35 -07003233 spin_unlock(&tp->lock);
Linus Torvalds1da177e2005-04-16 15:20:36 -07003234 }
3235
3236 return (done ? 0 : 1);
3237}
3238
David S. Millerf47c11e2005-06-24 20:18:35 -07003239static void tg3_irq_quiesce(struct tg3 *tp)
3240{
3241 BUG_ON(tp->irq_sync);
3242
3243 tp->irq_sync = 1;
3244 smp_mb();
3245
3246 synchronize_irq(tp->pdev->irq);
3247}
3248
3249static inline int tg3_irq_sync(struct tg3 *tp)
3250{
3251 return tp->irq_sync;
3252}
3253
3254/* Fully shutdown all tg3 driver activity elsewhere in the system.
3255 * If irq_sync is non-zero, then the IRQ handler must be synchronized
3256 * with as well. Most of the time, this is not necessary except when
3257 * shutting down the device.
3258 */
3259static inline void tg3_full_lock(struct tg3 *tp, int irq_sync)
3260{
3261 if (irq_sync)
3262 tg3_irq_quiesce(tp);
3263 spin_lock_bh(&tp->lock);
3264 spin_lock(&tp->tx_lock);
3265}
3266
3267static inline void tg3_full_unlock(struct tg3 *tp)
3268{
3269 spin_unlock(&tp->tx_lock);
3270 spin_unlock_bh(&tp->lock);
3271}
3272
Michael Chan88b06bc2005-04-21 17:13:25 -07003273/* MSI ISR - No need to check for interrupt sharing and no need to
3274 * flush status block and interrupt mailbox. PCI ordering rules
3275 * guarantee that MSI will arrive after the status block.
3276 */
3277static irqreturn_t tg3_msi(int irq, void *dev_id, struct pt_regs *regs)
3278{
3279 struct net_device *dev = dev_id;
3280 struct tg3 *tp = netdev_priv(dev);
3281 struct tg3_hw_status *sblk = tp->hw_status;
Michael Chan88b06bc2005-04-21 17:13:25 -07003282
3283 /*
David S. Millerfac9b832005-05-18 22:46:34 -07003284 * Writing any value to intr-mbox-0 clears PCI INTA# and
Michael Chan88b06bc2005-04-21 17:13:25 -07003285 * chip-internal interrupt pending events.
David S. Millerfac9b832005-05-18 22:46:34 -07003286 * Writing non-zero to intr-mbox-0 additional tells the
Michael Chan88b06bc2005-04-21 17:13:25 -07003287 * NIC to stop sending us irqs, engaging "in-intr-handler"
3288 * event coalescing.
3289 */
3290 tw32_mailbox(MAILBOX_INTERRUPT_0 + TG3_64BIT_REG_LOW, 0x00000001);
David S. Millerfac9b832005-05-18 22:46:34 -07003291 tp->last_tag = sblk->status_tag;
David S. Millercd024c82005-06-24 20:17:10 -07003292 rmb();
David S. Millerf47c11e2005-06-24 20:18:35 -07003293 if (tg3_irq_sync(tp))
3294 goto out;
Michael Chan88b06bc2005-04-21 17:13:25 -07003295 sblk->status &= ~SD_STATUS_UPDATED;
Michael Chan04237dd2005-04-25 15:17:17 -07003296 if (likely(tg3_has_work(tp)))
Michael Chan88b06bc2005-04-21 17:13:25 -07003297 netif_rx_schedule(dev); /* schedule NAPI poll */
3298 else {
David S. Millerfac9b832005-05-18 22:46:34 -07003299 /* No work, re-enable interrupts. */
Michael Chan88b06bc2005-04-21 17:13:25 -07003300 tw32_mailbox(MAILBOX_INTERRUPT_0 + TG3_64BIT_REG_LOW,
David S. Millerfac9b832005-05-18 22:46:34 -07003301 tp->last_tag << 24);
Michael Chan88b06bc2005-04-21 17:13:25 -07003302 }
David S. Millerf47c11e2005-06-24 20:18:35 -07003303out:
Michael Chan88b06bc2005-04-21 17:13:25 -07003304 return IRQ_RETVAL(1);
3305}
3306
Linus Torvalds1da177e2005-04-16 15:20:36 -07003307static irqreturn_t tg3_interrupt(int irq, void *dev_id, struct pt_regs *regs)
3308{
3309 struct net_device *dev = dev_id;
3310 struct tg3 *tp = netdev_priv(dev);
3311 struct tg3_hw_status *sblk = tp->hw_status;
Linus Torvalds1da177e2005-04-16 15:20:36 -07003312 unsigned int handled = 1;
3313
Linus Torvalds1da177e2005-04-16 15:20:36 -07003314 /* In INTx mode, it is possible for the interrupt to arrive at
3315 * the CPU before the status block posted prior to the interrupt.
3316 * Reading the PCI State register will confirm whether the
3317 * interrupt is ours and will flush the status block.
3318 */
3319 if ((sblk->status & SD_STATUS_UPDATED) ||
3320 !(tr32(TG3PCI_PCISTATE) & PCISTATE_INT_NOT_ACTIVE)) {
3321 /*
David S. Millerfac9b832005-05-18 22:46:34 -07003322 * Writing any value to intr-mbox-0 clears PCI INTA# and
3323 * chip-internal interrupt pending events.
3324 * Writing non-zero to intr-mbox-0 additional tells the
3325 * NIC to stop sending us irqs, engaging "in-intr-handler"
3326 * event coalescing.
3327 */
3328 tw32_mailbox(MAILBOX_INTERRUPT_0 + TG3_64BIT_REG_LOW,
3329 0x00000001);
David S. Millerf47c11e2005-06-24 20:18:35 -07003330 if (tg3_irq_sync(tp))
3331 goto out;
David S. Millerfac9b832005-05-18 22:46:34 -07003332 sblk->status &= ~SD_STATUS_UPDATED;
3333 if (likely(tg3_has_work(tp)))
3334 netif_rx_schedule(dev); /* schedule NAPI poll */
3335 else {
3336 /* No work, shared interrupt perhaps? re-enable
3337 * interrupts, and flush that PCI write
3338 */
Michael Chan09ee9292005-08-09 20:17:00 -07003339 tw32_mailbox_f(MAILBOX_INTERRUPT_0 + TG3_64BIT_REG_LOW,
David S. Millerfac9b832005-05-18 22:46:34 -07003340 0x00000000);
David S. Millerfac9b832005-05-18 22:46:34 -07003341 }
3342 } else { /* shared interrupt */
3343 handled = 0;
3344 }
David S. Millerf47c11e2005-06-24 20:18:35 -07003345out:
David S. Millerfac9b832005-05-18 22:46:34 -07003346 return IRQ_RETVAL(handled);
3347}
3348
3349static irqreturn_t tg3_interrupt_tagged(int irq, void *dev_id, struct pt_regs *regs)
3350{
3351 struct net_device *dev = dev_id;
3352 struct tg3 *tp = netdev_priv(dev);
3353 struct tg3_hw_status *sblk = tp->hw_status;
David S. Millerfac9b832005-05-18 22:46:34 -07003354 unsigned int handled = 1;
3355
David S. Millerfac9b832005-05-18 22:46:34 -07003356 /* In INTx mode, it is possible for the interrupt to arrive at
3357 * the CPU before the status block posted prior to the interrupt.
3358 * Reading the PCI State register will confirm whether the
3359 * interrupt is ours and will flush the status block.
3360 */
3361 if ((sblk->status & SD_STATUS_UPDATED) ||
3362 !(tr32(TG3PCI_PCISTATE) & PCISTATE_INT_NOT_ACTIVE)) {
3363 /*
Linus Torvalds1da177e2005-04-16 15:20:36 -07003364 * writing any value to intr-mbox-0 clears PCI INTA# and
3365 * chip-internal interrupt pending events.
3366 * writing non-zero to intr-mbox-0 additional tells the
3367 * NIC to stop sending us irqs, engaging "in-intr-handler"
3368 * event coalescing.
3369 */
3370 tw32_mailbox(MAILBOX_INTERRUPT_0 + TG3_64BIT_REG_LOW,
3371 0x00000001);
David S. Millerfac9b832005-05-18 22:46:34 -07003372 tp->last_tag = sblk->status_tag;
David S. Millercd024c82005-06-24 20:17:10 -07003373 rmb();
David S. Millerf47c11e2005-06-24 20:18:35 -07003374 if (tg3_irq_sync(tp))
3375 goto out;
Linus Torvalds1da177e2005-04-16 15:20:36 -07003376 sblk->status &= ~SD_STATUS_UPDATED;
Michael Chan04237dd2005-04-25 15:17:17 -07003377 if (likely(tg3_has_work(tp)))
Linus Torvalds1da177e2005-04-16 15:20:36 -07003378 netif_rx_schedule(dev); /* schedule NAPI poll */
3379 else {
3380 /* no work, shared interrupt perhaps? re-enable
3381 * interrupts, and flush that PCI write
3382 */
Michael Chan09ee9292005-08-09 20:17:00 -07003383 tw32_mailbox_f(MAILBOX_INTERRUPT_0 + TG3_64BIT_REG_LOW,
3384 tp->last_tag << 24);
Linus Torvalds1da177e2005-04-16 15:20:36 -07003385 }
3386 } else { /* shared interrupt */
3387 handled = 0;
3388 }
David S. Millerf47c11e2005-06-24 20:18:35 -07003389out:
Linus Torvalds1da177e2005-04-16 15:20:36 -07003390 return IRQ_RETVAL(handled);
3391}
3392
Michael Chan79381092005-04-21 17:13:59 -07003393/* ISR for interrupt test */
3394static irqreturn_t tg3_test_isr(int irq, void *dev_id,
3395 struct pt_regs *regs)
3396{
3397 struct net_device *dev = dev_id;
3398 struct tg3 *tp = netdev_priv(dev);
3399 struct tg3_hw_status *sblk = tp->hw_status;
3400
3401 if (sblk->status & SD_STATUS_UPDATED) {
3402 tw32_mailbox(MAILBOX_INTERRUPT_0 + TG3_64BIT_REG_LOW,
3403 0x00000001);
3404 return IRQ_RETVAL(1);
3405 }
3406 return IRQ_RETVAL(0);
3407}
3408
Linus Torvalds1da177e2005-04-16 15:20:36 -07003409static int tg3_init_hw(struct tg3 *);
Michael Chan944d9802005-05-29 14:57:48 -07003410static int tg3_halt(struct tg3 *, int, int);
Linus Torvalds1da177e2005-04-16 15:20:36 -07003411
3412#ifdef CONFIG_NET_POLL_CONTROLLER
3413static void tg3_poll_controller(struct net_device *dev)
3414{
Michael Chan88b06bc2005-04-21 17:13:25 -07003415 struct tg3 *tp = netdev_priv(dev);
3416
3417 tg3_interrupt(tp->pdev->irq, dev, NULL);
Linus Torvalds1da177e2005-04-16 15:20:36 -07003418}
3419#endif
3420
3421static void tg3_reset_task(void *_data)
3422{
3423 struct tg3 *tp = _data;
3424 unsigned int restart_timer;
3425
3426 tg3_netif_stop(tp);
3427
David S. Millerf47c11e2005-06-24 20:18:35 -07003428 tg3_full_lock(tp, 1);
Linus Torvalds1da177e2005-04-16 15:20:36 -07003429
3430 restart_timer = tp->tg3_flags2 & TG3_FLG2_RESTART_TIMER;
3431 tp->tg3_flags2 &= ~TG3_FLG2_RESTART_TIMER;
3432
Michael Chan944d9802005-05-29 14:57:48 -07003433 tg3_halt(tp, RESET_KIND_SHUTDOWN, 0);
Linus Torvalds1da177e2005-04-16 15:20:36 -07003434 tg3_init_hw(tp);
3435
3436 tg3_netif_start(tp);
3437
David S. Millerf47c11e2005-06-24 20:18:35 -07003438 tg3_full_unlock(tp);
Linus Torvalds1da177e2005-04-16 15:20:36 -07003439
3440 if (restart_timer)
3441 mod_timer(&tp->timer, jiffies + 1);
3442}
3443
3444static void tg3_tx_timeout(struct net_device *dev)
3445{
3446 struct tg3 *tp = netdev_priv(dev);
3447
3448 printk(KERN_ERR PFX "%s: transmit timed out, resetting\n",
3449 dev->name);
3450
3451 schedule_work(&tp->reset_task);
3452}
3453
3454static void tg3_set_txd(struct tg3 *, int, dma_addr_t, int, u32, u32);
3455
3456static int tigon3_4gb_hwbug_workaround(struct tg3 *tp, struct sk_buff *skb,
3457 u32 guilty_entry, int guilty_len,
3458 u32 last_plus_one, u32 *start, u32 mss)
3459{
3460 struct sk_buff *new_skb = skb_copy(skb, GFP_ATOMIC);
3461 dma_addr_t new_addr;
3462 u32 entry = *start;
3463 int i;
3464
3465 if (!new_skb) {
3466 dev_kfree_skb(skb);
3467 return -1;
3468 }
3469
3470 /* New SKB is guaranteed to be linear. */
3471 entry = *start;
3472 new_addr = pci_map_single(tp->pdev, new_skb->data, new_skb->len,
3473 PCI_DMA_TODEVICE);
3474 tg3_set_txd(tp, entry, new_addr, new_skb->len,
3475 (skb->ip_summed == CHECKSUM_HW) ?
3476 TXD_FLAG_TCPUDP_CSUM : 0, 1 | (mss << 1));
3477 *start = NEXT_TX(entry);
3478
3479 /* Now clean up the sw ring entries. */
3480 i = 0;
3481 while (entry != last_plus_one) {
3482 int len;
3483
3484 if (i == 0)
3485 len = skb_headlen(skb);
3486 else
3487 len = skb_shinfo(skb)->frags[i-1].size;
3488 pci_unmap_single(tp->pdev,
3489 pci_unmap_addr(&tp->tx_buffers[entry], mapping),
3490 len, PCI_DMA_TODEVICE);
3491 if (i == 0) {
3492 tp->tx_buffers[entry].skb = new_skb;
3493 pci_unmap_addr_set(&tp->tx_buffers[entry], mapping, new_addr);
3494 } else {
3495 tp->tx_buffers[entry].skb = NULL;
3496 }
3497 entry = NEXT_TX(entry);
3498 i++;
3499 }
3500
3501 dev_kfree_skb(skb);
3502
3503 return 0;
3504}
3505
3506static void tg3_set_txd(struct tg3 *tp, int entry,
3507 dma_addr_t mapping, int len, u32 flags,
3508 u32 mss_and_is_end)
3509{
3510 struct tg3_tx_buffer_desc *txd = &tp->tx_ring[entry];
3511 int is_end = (mss_and_is_end & 0x1);
3512 u32 mss = (mss_and_is_end >> 1);
3513 u32 vlan_tag = 0;
3514
3515 if (is_end)
3516 flags |= TXD_FLAG_END;
3517 if (flags & TXD_FLAG_VLAN) {
3518 vlan_tag = flags >> 16;
3519 flags &= 0xffff;
3520 }
3521 vlan_tag |= (mss << TXD_MSS_SHIFT);
3522
3523 txd->addr_hi = ((u64) mapping >> 32);
3524 txd->addr_lo = ((u64) mapping & 0xffffffff);
3525 txd->len_flags = (len << TXD_LEN_SHIFT) | flags;
3526 txd->vlan_tag = vlan_tag << TXD_VLAN_TAG_SHIFT;
3527}
3528
3529static inline int tg3_4g_overflow_test(dma_addr_t mapping, int len)
3530{
3531 u32 base = (u32) mapping & 0xffffffff;
3532
3533 return ((base > 0xffffdcc0) &&
3534 (base + len + 8 < base));
3535}
3536
3537static int tg3_start_xmit(struct sk_buff *skb, struct net_device *dev)
3538{
3539 struct tg3 *tp = netdev_priv(dev);
3540 dma_addr_t mapping;
3541 unsigned int i;
3542 u32 len, entry, base_flags, mss;
3543 int would_hit_hwbug;
Linus Torvalds1da177e2005-04-16 15:20:36 -07003544
3545 len = skb_headlen(skb);
3546
3547 /* No BH disabling for tx_lock here. We are running in BH disabled
3548 * context and TX reclaim runs via tp->poll inside of a software
David S. Millerf47c11e2005-06-24 20:18:35 -07003549 * interrupt. Furthermore, IRQ processing runs lockless so we have
3550 * no IRQ context deadlocks to worry about either. Rejoice!
Linus Torvalds1da177e2005-04-16 15:20:36 -07003551 */
David S. Millerf47c11e2005-06-24 20:18:35 -07003552 if (!spin_trylock(&tp->tx_lock))
Linus Torvalds1da177e2005-04-16 15:20:36 -07003553 return NETDEV_TX_LOCKED;
Linus Torvalds1da177e2005-04-16 15:20:36 -07003554
3555 /* This is a hard error, log it. */
3556 if (unlikely(TX_BUFFS_AVAIL(tp) <= (skb_shinfo(skb)->nr_frags + 1))) {
3557 netif_stop_queue(dev);
David S. Millerf47c11e2005-06-24 20:18:35 -07003558 spin_unlock(&tp->tx_lock);
Linus Torvalds1da177e2005-04-16 15:20:36 -07003559 printk(KERN_ERR PFX "%s: BUG! Tx Ring full when queue awake!\n",
3560 dev->name);
3561 return NETDEV_TX_BUSY;
3562 }
3563
3564 entry = tp->tx_prod;
3565 base_flags = 0;
3566 if (skb->ip_summed == CHECKSUM_HW)
3567 base_flags |= TXD_FLAG_TCPUDP_CSUM;
3568#if TG3_TSO_SUPPORT != 0
3569 mss = 0;
3570 if (skb->len > (tp->dev->mtu + ETH_HLEN) &&
3571 (mss = skb_shinfo(skb)->tso_size) != 0) {
3572 int tcp_opt_len, ip_tcp_len;
3573
3574 if (skb_header_cloned(skb) &&
3575 pskb_expand_head(skb, 0, 0, GFP_ATOMIC)) {
3576 dev_kfree_skb(skb);
3577 goto out_unlock;
3578 }
3579
3580 tcp_opt_len = ((skb->h.th->doff - 5) * 4);
3581 ip_tcp_len = (skb->nh.iph->ihl * 4) + sizeof(struct tcphdr);
3582
3583 base_flags |= (TXD_FLAG_CPU_PRE_DMA |
3584 TXD_FLAG_CPU_POST_DMA);
3585
3586 skb->nh.iph->check = 0;
3587 skb->nh.iph->tot_len = ntohs(mss + ip_tcp_len + tcp_opt_len);
3588 if (tp->tg3_flags2 & TG3_FLG2_HW_TSO) {
3589 skb->h.th->check = 0;
3590 base_flags &= ~TXD_FLAG_TCPUDP_CSUM;
3591 }
3592 else {
3593 skb->h.th->check =
3594 ~csum_tcpudp_magic(skb->nh.iph->saddr,
3595 skb->nh.iph->daddr,
3596 0, IPPROTO_TCP, 0);
3597 }
3598
3599 if ((tp->tg3_flags2 & TG3_FLG2_HW_TSO) ||
3600 (GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_5705)) {
3601 if (tcp_opt_len || skb->nh.iph->ihl > 5) {
3602 int tsflags;
3603
3604 tsflags = ((skb->nh.iph->ihl - 5) +
3605 (tcp_opt_len >> 2));
3606 mss |= (tsflags << 11);
3607 }
3608 } else {
3609 if (tcp_opt_len || skb->nh.iph->ihl > 5) {
3610 int tsflags;
3611
3612 tsflags = ((skb->nh.iph->ihl - 5) +
3613 (tcp_opt_len >> 2));
3614 base_flags |= tsflags << 12;
3615 }
3616 }
3617 }
3618#else
3619 mss = 0;
3620#endif
3621#if TG3_VLAN_TAG_USED
3622 if (tp->vlgrp != NULL && vlan_tx_tag_present(skb))
3623 base_flags |= (TXD_FLAG_VLAN |
3624 (vlan_tx_tag_get(skb) << 16));
3625#endif
3626
3627 /* Queue skb data, a.k.a. the main skb fragment. */
3628 mapping = pci_map_single(tp->pdev, skb->data, len, PCI_DMA_TODEVICE);
3629
3630 tp->tx_buffers[entry].skb = skb;
3631 pci_unmap_addr_set(&tp->tx_buffers[entry], mapping, mapping);
3632
3633 would_hit_hwbug = 0;
3634
3635 if (tg3_4g_overflow_test(mapping, len))
3636 would_hit_hwbug = entry + 1;
3637
3638 tg3_set_txd(tp, entry, mapping, len, base_flags,
3639 (skb_shinfo(skb)->nr_frags == 0) | (mss << 1));
3640
3641 entry = NEXT_TX(entry);
3642
3643 /* Now loop through additional data fragments, and queue them. */
3644 if (skb_shinfo(skb)->nr_frags > 0) {
3645 unsigned int i, last;
3646
3647 last = skb_shinfo(skb)->nr_frags - 1;
3648 for (i = 0; i <= last; i++) {
3649 skb_frag_t *frag = &skb_shinfo(skb)->frags[i];
3650
3651 len = frag->size;
3652 mapping = pci_map_page(tp->pdev,
3653 frag->page,
3654 frag->page_offset,
3655 len, PCI_DMA_TODEVICE);
3656
3657 tp->tx_buffers[entry].skb = NULL;
3658 pci_unmap_addr_set(&tp->tx_buffers[entry], mapping, mapping);
3659
3660 if (tg3_4g_overflow_test(mapping, len)) {
3661 /* Only one should match. */
3662 if (would_hit_hwbug)
3663 BUG();
3664 would_hit_hwbug = entry + 1;
3665 }
3666
3667 if (tp->tg3_flags2 & TG3_FLG2_HW_TSO)
3668 tg3_set_txd(tp, entry, mapping, len,
3669 base_flags, (i == last)|(mss << 1));
3670 else
3671 tg3_set_txd(tp, entry, mapping, len,
3672 base_flags, (i == last));
3673
3674 entry = NEXT_TX(entry);
3675 }
3676 }
3677
3678 if (would_hit_hwbug) {
3679 u32 last_plus_one = entry;
3680 u32 start;
3681 unsigned int len = 0;
3682
3683 would_hit_hwbug -= 1;
3684 entry = entry - 1 - skb_shinfo(skb)->nr_frags;
3685 entry &= (TG3_TX_RING_SIZE - 1);
3686 start = entry;
3687 i = 0;
3688 while (entry != last_plus_one) {
3689 if (i == 0)
3690 len = skb_headlen(skb);
3691 else
3692 len = skb_shinfo(skb)->frags[i-1].size;
3693
3694 if (entry == would_hit_hwbug)
3695 break;
3696
3697 i++;
3698 entry = NEXT_TX(entry);
3699
3700 }
3701
3702 /* If the workaround fails due to memory/mapping
3703 * failure, silently drop this packet.
3704 */
3705 if (tigon3_4gb_hwbug_workaround(tp, skb,
3706 entry, len,
3707 last_plus_one,
3708 &start, mss))
3709 goto out_unlock;
3710
3711 entry = start;
3712 }
3713
3714 /* Packets are ready, update Tx producer idx local and on card. */
3715 tw32_tx_mbox((MAILBOX_SNDHOST_PROD_IDX_0 + TG3_64BIT_REG_LOW), entry);
3716
3717 tp->tx_prod = entry;
Michael Chan51b91462005-09-01 17:41:28 -07003718 if (TX_BUFFS_AVAIL(tp) <= (MAX_SKB_FRAGS + 1)) {
Linus Torvalds1da177e2005-04-16 15:20:36 -07003719 netif_stop_queue(dev);
Michael Chan51b91462005-09-01 17:41:28 -07003720 if (TX_BUFFS_AVAIL(tp) > TG3_TX_WAKEUP_THRESH)
3721 netif_wake_queue(tp->dev);
3722 }
Linus Torvalds1da177e2005-04-16 15:20:36 -07003723
3724out_unlock:
3725 mmiowb();
David S. Millerf47c11e2005-06-24 20:18:35 -07003726 spin_unlock(&tp->tx_lock);
Linus Torvalds1da177e2005-04-16 15:20:36 -07003727
3728 dev->trans_start = jiffies;
3729
3730 return NETDEV_TX_OK;
3731}
3732
3733static inline void tg3_set_mtu(struct net_device *dev, struct tg3 *tp,
3734 int new_mtu)
3735{
3736 dev->mtu = new_mtu;
3737
Michael Chanef7f5ec2005-07-25 12:32:25 -07003738 if (new_mtu > ETH_DATA_LEN) {
3739 if (GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_5780) {
3740 tp->tg3_flags2 &= ~TG3_FLG2_TSO_CAPABLE;
3741 ethtool_op_set_tso(dev, 0);
3742 }
3743 else
3744 tp->tg3_flags |= TG3_FLAG_JUMBO_RING_ENABLE;
3745 } else {
3746 if (GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_5780)
3747 tp->tg3_flags2 |= TG3_FLG2_TSO_CAPABLE;
Michael Chan0f893dc2005-07-25 12:30:38 -07003748 tp->tg3_flags &= ~TG3_FLAG_JUMBO_RING_ENABLE;
Michael Chanef7f5ec2005-07-25 12:32:25 -07003749 }
Linus Torvalds1da177e2005-04-16 15:20:36 -07003750}
3751
3752static int tg3_change_mtu(struct net_device *dev, int new_mtu)
3753{
3754 struct tg3 *tp = netdev_priv(dev);
3755
3756 if (new_mtu < TG3_MIN_MTU || new_mtu > TG3_MAX_MTU(tp))
3757 return -EINVAL;
3758
3759 if (!netif_running(dev)) {
3760 /* We'll just catch it later when the
3761 * device is up'd.
3762 */
3763 tg3_set_mtu(dev, tp, new_mtu);
3764 return 0;
3765 }
3766
3767 tg3_netif_stop(tp);
David S. Millerf47c11e2005-06-24 20:18:35 -07003768
3769 tg3_full_lock(tp, 1);
Linus Torvalds1da177e2005-04-16 15:20:36 -07003770
Michael Chan944d9802005-05-29 14:57:48 -07003771 tg3_halt(tp, RESET_KIND_SHUTDOWN, 1);
Linus Torvalds1da177e2005-04-16 15:20:36 -07003772
3773 tg3_set_mtu(dev, tp, new_mtu);
3774
3775 tg3_init_hw(tp);
3776
3777 tg3_netif_start(tp);
3778
David S. Millerf47c11e2005-06-24 20:18:35 -07003779 tg3_full_unlock(tp);
Linus Torvalds1da177e2005-04-16 15:20:36 -07003780
3781 return 0;
3782}
3783
3784/* Free up pending packets in all rx/tx rings.
3785 *
3786 * The chip has been shut down and the driver detached from
3787 * the networking, so no interrupts or new tx packets will
3788 * end up in the driver. tp->{tx,}lock is not held and we are not
3789 * in an interrupt context and thus may sleep.
3790 */
3791static void tg3_free_rings(struct tg3 *tp)
3792{
3793 struct ring_info *rxp;
3794 int i;
3795
3796 for (i = 0; i < TG3_RX_RING_SIZE; i++) {
3797 rxp = &tp->rx_std_buffers[i];
3798
3799 if (rxp->skb == NULL)
3800 continue;
3801 pci_unmap_single(tp->pdev,
3802 pci_unmap_addr(rxp, mapping),
Michael Chan7e72aad2005-07-25 12:31:17 -07003803 tp->rx_pkt_buf_sz - tp->rx_offset,
Linus Torvalds1da177e2005-04-16 15:20:36 -07003804 PCI_DMA_FROMDEVICE);
3805 dev_kfree_skb_any(rxp->skb);
3806 rxp->skb = NULL;
3807 }
3808
3809 for (i = 0; i < TG3_RX_JUMBO_RING_SIZE; i++) {
3810 rxp = &tp->rx_jumbo_buffers[i];
3811
3812 if (rxp->skb == NULL)
3813 continue;
3814 pci_unmap_single(tp->pdev,
3815 pci_unmap_addr(rxp, mapping),
3816 RX_JUMBO_PKT_BUF_SZ - tp->rx_offset,
3817 PCI_DMA_FROMDEVICE);
3818 dev_kfree_skb_any(rxp->skb);
3819 rxp->skb = NULL;
3820 }
3821
3822 for (i = 0; i < TG3_TX_RING_SIZE; ) {
3823 struct tx_ring_info *txp;
3824 struct sk_buff *skb;
3825 int j;
3826
3827 txp = &tp->tx_buffers[i];
3828 skb = txp->skb;
3829
3830 if (skb == NULL) {
3831 i++;
3832 continue;
3833 }
3834
3835 pci_unmap_single(tp->pdev,
3836 pci_unmap_addr(txp, mapping),
3837 skb_headlen(skb),
3838 PCI_DMA_TODEVICE);
3839 txp->skb = NULL;
3840
3841 i++;
3842
3843 for (j = 0; j < skb_shinfo(skb)->nr_frags; j++) {
3844 txp = &tp->tx_buffers[i & (TG3_TX_RING_SIZE - 1)];
3845 pci_unmap_page(tp->pdev,
3846 pci_unmap_addr(txp, mapping),
3847 skb_shinfo(skb)->frags[j].size,
3848 PCI_DMA_TODEVICE);
3849 i++;
3850 }
3851
3852 dev_kfree_skb_any(skb);
3853 }
3854}
3855
3856/* Initialize tx/rx rings for packet processing.
3857 *
3858 * The chip has been shut down and the driver detached from
3859 * the networking, so no interrupts or new tx packets will
3860 * end up in the driver. tp->{tx,}lock are held and thus
3861 * we may not sleep.
3862 */
3863static void tg3_init_rings(struct tg3 *tp)
3864{
3865 u32 i;
3866
3867 /* Free up all the SKBs. */
3868 tg3_free_rings(tp);
3869
3870 /* Zero out all descriptors. */
3871 memset(tp->rx_std, 0, TG3_RX_RING_BYTES);
3872 memset(tp->rx_jumbo, 0, TG3_RX_JUMBO_RING_BYTES);
3873 memset(tp->rx_rcb, 0, TG3_RX_RCB_RING_BYTES(tp));
3874 memset(tp->tx_ring, 0, TG3_TX_RING_BYTES);
3875
Michael Chan7e72aad2005-07-25 12:31:17 -07003876 tp->rx_pkt_buf_sz = RX_PKT_BUF_SZ;
3877 if ((GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_5780) &&
3878 (tp->dev->mtu > ETH_DATA_LEN))
3879 tp->rx_pkt_buf_sz = RX_JUMBO_PKT_BUF_SZ;
3880
Linus Torvalds1da177e2005-04-16 15:20:36 -07003881 /* Initialize invariants of the rings, we only set this
3882 * stuff once. This works because the card does not
3883 * write into the rx buffer posting rings.
3884 */
3885 for (i = 0; i < TG3_RX_RING_SIZE; i++) {
3886 struct tg3_rx_buffer_desc *rxd;
3887
3888 rxd = &tp->rx_std[i];
Michael Chan7e72aad2005-07-25 12:31:17 -07003889 rxd->idx_len = (tp->rx_pkt_buf_sz - tp->rx_offset - 64)
Linus Torvalds1da177e2005-04-16 15:20:36 -07003890 << RXD_LEN_SHIFT;
3891 rxd->type_flags = (RXD_FLAG_END << RXD_FLAGS_SHIFT);
3892 rxd->opaque = (RXD_OPAQUE_RING_STD |
3893 (i << RXD_OPAQUE_INDEX_SHIFT));
3894 }
3895
Michael Chan0f893dc2005-07-25 12:30:38 -07003896 if (tp->tg3_flags & TG3_FLAG_JUMBO_RING_ENABLE) {
Linus Torvalds1da177e2005-04-16 15:20:36 -07003897 for (i = 0; i < TG3_RX_JUMBO_RING_SIZE; i++) {
3898 struct tg3_rx_buffer_desc *rxd;
3899
3900 rxd = &tp->rx_jumbo[i];
3901 rxd->idx_len = (RX_JUMBO_PKT_BUF_SZ - tp->rx_offset - 64)
3902 << RXD_LEN_SHIFT;
3903 rxd->type_flags = (RXD_FLAG_END << RXD_FLAGS_SHIFT) |
3904 RXD_FLAG_JUMBO;
3905 rxd->opaque = (RXD_OPAQUE_RING_JUMBO |
3906 (i << RXD_OPAQUE_INDEX_SHIFT));
3907 }
3908 }
3909
3910 /* Now allocate fresh SKBs for each rx ring. */
3911 for (i = 0; i < tp->rx_pending; i++) {
3912 if (tg3_alloc_rx_skb(tp, RXD_OPAQUE_RING_STD,
3913 -1, i) < 0)
3914 break;
3915 }
3916
Michael Chan0f893dc2005-07-25 12:30:38 -07003917 if (tp->tg3_flags & TG3_FLAG_JUMBO_RING_ENABLE) {
Linus Torvalds1da177e2005-04-16 15:20:36 -07003918 for (i = 0; i < tp->rx_jumbo_pending; i++) {
3919 if (tg3_alloc_rx_skb(tp, RXD_OPAQUE_RING_JUMBO,
3920 -1, i) < 0)
3921 break;
3922 }
3923 }
3924}
3925
3926/*
3927 * Must not be invoked with interrupt sources disabled and
3928 * the hardware shutdown down.
3929 */
3930static void tg3_free_consistent(struct tg3 *tp)
3931{
3932 if (tp->rx_std_buffers) {
3933 kfree(tp->rx_std_buffers);
3934 tp->rx_std_buffers = NULL;
3935 }
3936 if (tp->rx_std) {
3937 pci_free_consistent(tp->pdev, TG3_RX_RING_BYTES,
3938 tp->rx_std, tp->rx_std_mapping);
3939 tp->rx_std = NULL;
3940 }
3941 if (tp->rx_jumbo) {
3942 pci_free_consistent(tp->pdev, TG3_RX_JUMBO_RING_BYTES,
3943 tp->rx_jumbo, tp->rx_jumbo_mapping);
3944 tp->rx_jumbo = NULL;
3945 }
3946 if (tp->rx_rcb) {
3947 pci_free_consistent(tp->pdev, TG3_RX_RCB_RING_BYTES(tp),
3948 tp->rx_rcb, tp->rx_rcb_mapping);
3949 tp->rx_rcb = NULL;
3950 }
3951 if (tp->tx_ring) {
3952 pci_free_consistent(tp->pdev, TG3_TX_RING_BYTES,
3953 tp->tx_ring, tp->tx_desc_mapping);
3954 tp->tx_ring = NULL;
3955 }
3956 if (tp->hw_status) {
3957 pci_free_consistent(tp->pdev, TG3_HW_STATUS_SIZE,
3958 tp->hw_status, tp->status_mapping);
3959 tp->hw_status = NULL;
3960 }
3961 if (tp->hw_stats) {
3962 pci_free_consistent(tp->pdev, sizeof(struct tg3_hw_stats),
3963 tp->hw_stats, tp->stats_mapping);
3964 tp->hw_stats = NULL;
3965 }
3966}
3967
3968/*
3969 * Must not be invoked with interrupt sources disabled and
3970 * the hardware shutdown down. Can sleep.
3971 */
3972static int tg3_alloc_consistent(struct tg3 *tp)
3973{
3974 tp->rx_std_buffers = kmalloc((sizeof(struct ring_info) *
3975 (TG3_RX_RING_SIZE +
3976 TG3_RX_JUMBO_RING_SIZE)) +
3977 (sizeof(struct tx_ring_info) *
3978 TG3_TX_RING_SIZE),
3979 GFP_KERNEL);
3980 if (!tp->rx_std_buffers)
3981 return -ENOMEM;
3982
3983 memset(tp->rx_std_buffers, 0,
3984 (sizeof(struct ring_info) *
3985 (TG3_RX_RING_SIZE +
3986 TG3_RX_JUMBO_RING_SIZE)) +
3987 (sizeof(struct tx_ring_info) *
3988 TG3_TX_RING_SIZE));
3989
3990 tp->rx_jumbo_buffers = &tp->rx_std_buffers[TG3_RX_RING_SIZE];
3991 tp->tx_buffers = (struct tx_ring_info *)
3992 &tp->rx_jumbo_buffers[TG3_RX_JUMBO_RING_SIZE];
3993
3994 tp->rx_std = pci_alloc_consistent(tp->pdev, TG3_RX_RING_BYTES,
3995 &tp->rx_std_mapping);
3996 if (!tp->rx_std)
3997 goto err_out;
3998
3999 tp->rx_jumbo = pci_alloc_consistent(tp->pdev, TG3_RX_JUMBO_RING_BYTES,
4000 &tp->rx_jumbo_mapping);
4001
4002 if (!tp->rx_jumbo)
4003 goto err_out;
4004
4005 tp->rx_rcb = pci_alloc_consistent(tp->pdev, TG3_RX_RCB_RING_BYTES(tp),
4006 &tp->rx_rcb_mapping);
4007 if (!tp->rx_rcb)
4008 goto err_out;
4009
4010 tp->tx_ring = pci_alloc_consistent(tp->pdev, TG3_TX_RING_BYTES,
4011 &tp->tx_desc_mapping);
4012 if (!tp->tx_ring)
4013 goto err_out;
4014
4015 tp->hw_status = pci_alloc_consistent(tp->pdev,
4016 TG3_HW_STATUS_SIZE,
4017 &tp->status_mapping);
4018 if (!tp->hw_status)
4019 goto err_out;
4020
4021 tp->hw_stats = pci_alloc_consistent(tp->pdev,
4022 sizeof(struct tg3_hw_stats),
4023 &tp->stats_mapping);
4024 if (!tp->hw_stats)
4025 goto err_out;
4026
4027 memset(tp->hw_status, 0, TG3_HW_STATUS_SIZE);
4028 memset(tp->hw_stats, 0, sizeof(struct tg3_hw_stats));
4029
4030 return 0;
4031
4032err_out:
4033 tg3_free_consistent(tp);
4034 return -ENOMEM;
4035}
4036
4037#define MAX_WAIT_CNT 1000
4038
4039/* To stop a block, clear the enable bit and poll till it
4040 * clears. tp->lock is held.
4041 */
David S. Millerb3b7d6b2005-05-05 14:40:20 -07004042static int tg3_stop_block(struct tg3 *tp, unsigned long ofs, u32 enable_bit, int silent)
Linus Torvalds1da177e2005-04-16 15:20:36 -07004043{
4044 unsigned int i;
4045 u32 val;
4046
4047 if (tp->tg3_flags2 & TG3_FLG2_5705_PLUS) {
4048 switch (ofs) {
4049 case RCVLSC_MODE:
4050 case DMAC_MODE:
4051 case MBFREE_MODE:
4052 case BUFMGR_MODE:
4053 case MEMARB_MODE:
4054 /* We can't enable/disable these bits of the
4055 * 5705/5750, just say success.
4056 */
4057 return 0;
4058
4059 default:
4060 break;
4061 };
4062 }
4063
4064 val = tr32(ofs);
4065 val &= ~enable_bit;
4066 tw32_f(ofs, val);
4067
4068 for (i = 0; i < MAX_WAIT_CNT; i++) {
4069 udelay(100);
4070 val = tr32(ofs);
4071 if ((val & enable_bit) == 0)
4072 break;
4073 }
4074
David S. Millerb3b7d6b2005-05-05 14:40:20 -07004075 if (i == MAX_WAIT_CNT && !silent) {
Linus Torvalds1da177e2005-04-16 15:20:36 -07004076 printk(KERN_ERR PFX "tg3_stop_block timed out, "
4077 "ofs=%lx enable_bit=%x\n",
4078 ofs, enable_bit);
4079 return -ENODEV;
4080 }
4081
4082 return 0;
4083}
4084
4085/* tp->lock is held. */
David S. Millerb3b7d6b2005-05-05 14:40:20 -07004086static int tg3_abort_hw(struct tg3 *tp, int silent)
Linus Torvalds1da177e2005-04-16 15:20:36 -07004087{
4088 int i, err;
4089
4090 tg3_disable_ints(tp);
4091
4092 tp->rx_mode &= ~RX_MODE_ENABLE;
4093 tw32_f(MAC_RX_MODE, tp->rx_mode);
4094 udelay(10);
4095
David S. Millerb3b7d6b2005-05-05 14:40:20 -07004096 err = tg3_stop_block(tp, RCVBDI_MODE, RCVBDI_MODE_ENABLE, silent);
4097 err |= tg3_stop_block(tp, RCVLPC_MODE, RCVLPC_MODE_ENABLE, silent);
4098 err |= tg3_stop_block(tp, RCVLSC_MODE, RCVLSC_MODE_ENABLE, silent);
4099 err |= tg3_stop_block(tp, RCVDBDI_MODE, RCVDBDI_MODE_ENABLE, silent);
4100 err |= tg3_stop_block(tp, RCVDCC_MODE, RCVDCC_MODE_ENABLE, silent);
4101 err |= tg3_stop_block(tp, RCVCC_MODE, RCVCC_MODE_ENABLE, silent);
Linus Torvalds1da177e2005-04-16 15:20:36 -07004102
David S. Millerb3b7d6b2005-05-05 14:40:20 -07004103 err |= tg3_stop_block(tp, SNDBDS_MODE, SNDBDS_MODE_ENABLE, silent);
4104 err |= tg3_stop_block(tp, SNDBDI_MODE, SNDBDI_MODE_ENABLE, silent);
4105 err |= tg3_stop_block(tp, SNDDATAI_MODE, SNDDATAI_MODE_ENABLE, silent);
4106 err |= tg3_stop_block(tp, RDMAC_MODE, RDMAC_MODE_ENABLE, silent);
4107 err |= tg3_stop_block(tp, SNDDATAC_MODE, SNDDATAC_MODE_ENABLE, silent);
4108 err |= tg3_stop_block(tp, DMAC_MODE, DMAC_MODE_ENABLE, silent);
4109 err |= tg3_stop_block(tp, SNDBDC_MODE, SNDBDC_MODE_ENABLE, silent);
Linus Torvalds1da177e2005-04-16 15:20:36 -07004110
4111 tp->mac_mode &= ~MAC_MODE_TDE_ENABLE;
4112 tw32_f(MAC_MODE, tp->mac_mode);
4113 udelay(40);
4114
4115 tp->tx_mode &= ~TX_MODE_ENABLE;
4116 tw32_f(MAC_TX_MODE, tp->tx_mode);
4117
4118 for (i = 0; i < MAX_WAIT_CNT; i++) {
4119 udelay(100);
4120 if (!(tr32(MAC_TX_MODE) & TX_MODE_ENABLE))
4121 break;
4122 }
4123 if (i >= MAX_WAIT_CNT) {
4124 printk(KERN_ERR PFX "tg3_abort_hw timed out for %s, "
4125 "TX_MODE_ENABLE will not clear MAC_TX_MODE=%08x\n",
4126 tp->dev->name, tr32(MAC_TX_MODE));
Michael Chane6de8ad2005-05-05 14:42:41 -07004127 err |= -ENODEV;
Linus Torvalds1da177e2005-04-16 15:20:36 -07004128 }
4129
Michael Chane6de8ad2005-05-05 14:42:41 -07004130 err |= tg3_stop_block(tp, HOSTCC_MODE, HOSTCC_MODE_ENABLE, silent);
David S. Millerb3b7d6b2005-05-05 14:40:20 -07004131 err |= tg3_stop_block(tp, WDMAC_MODE, WDMAC_MODE_ENABLE, silent);
4132 err |= tg3_stop_block(tp, MBFREE_MODE, MBFREE_MODE_ENABLE, silent);
Linus Torvalds1da177e2005-04-16 15:20:36 -07004133
4134 tw32(FTQ_RESET, 0xffffffff);
4135 tw32(FTQ_RESET, 0x00000000);
4136
David S. Millerb3b7d6b2005-05-05 14:40:20 -07004137 err |= tg3_stop_block(tp, BUFMGR_MODE, BUFMGR_MODE_ENABLE, silent);
4138 err |= tg3_stop_block(tp, MEMARB_MODE, MEMARB_MODE_ENABLE, silent);
Linus Torvalds1da177e2005-04-16 15:20:36 -07004139
4140 if (tp->hw_status)
4141 memset(tp->hw_status, 0, TG3_HW_STATUS_SIZE);
4142 if (tp->hw_stats)
4143 memset(tp->hw_stats, 0, sizeof(struct tg3_hw_stats));
4144
Linus Torvalds1da177e2005-04-16 15:20:36 -07004145 return err;
4146}
4147
4148/* tp->lock is held. */
4149static int tg3_nvram_lock(struct tg3 *tp)
4150{
4151 if (tp->tg3_flags & TG3_FLAG_NVRAM) {
4152 int i;
4153
4154 tw32(NVRAM_SWARB, SWARB_REQ_SET1);
4155 for (i = 0; i < 8000; i++) {
4156 if (tr32(NVRAM_SWARB) & SWARB_GNT1)
4157 break;
4158 udelay(20);
4159 }
4160 if (i == 8000)
4161 return -ENODEV;
4162 }
4163 return 0;
4164}
4165
4166/* tp->lock is held. */
4167static void tg3_nvram_unlock(struct tg3 *tp)
4168{
4169 if (tp->tg3_flags & TG3_FLAG_NVRAM)
4170 tw32_f(NVRAM_SWARB, SWARB_REQ_CLR1);
4171}
4172
4173/* tp->lock is held. */
Michael Chane6af3012005-04-21 17:12:05 -07004174static void tg3_enable_nvram_access(struct tg3 *tp)
4175{
4176 if ((tp->tg3_flags2 & TG3_FLG2_5750_PLUS) &&
4177 !(tp->tg3_flags2 & TG3_FLG2_PROTECTED_NVRAM)) {
4178 u32 nvaccess = tr32(NVRAM_ACCESS);
4179
4180 tw32(NVRAM_ACCESS, nvaccess | ACCESS_ENABLE);
4181 }
4182}
4183
4184/* tp->lock is held. */
4185static void tg3_disable_nvram_access(struct tg3 *tp)
4186{
4187 if ((tp->tg3_flags2 & TG3_FLG2_5750_PLUS) &&
4188 !(tp->tg3_flags2 & TG3_FLG2_PROTECTED_NVRAM)) {
4189 u32 nvaccess = tr32(NVRAM_ACCESS);
4190
4191 tw32(NVRAM_ACCESS, nvaccess & ~ACCESS_ENABLE);
4192 }
4193}
4194
4195/* tp->lock is held. */
Linus Torvalds1da177e2005-04-16 15:20:36 -07004196static void tg3_write_sig_pre_reset(struct tg3 *tp, int kind)
4197{
4198 if (!(tp->tg3_flags2 & TG3_FLG2_SUN_570X))
4199 tg3_write_mem(tp, NIC_SRAM_FIRMWARE_MBOX,
4200 NIC_SRAM_FIRMWARE_MBOX_MAGIC1);
4201
4202 if (tp->tg3_flags2 & TG3_FLG2_ASF_NEW_HANDSHAKE) {
4203 switch (kind) {
4204 case RESET_KIND_INIT:
4205 tg3_write_mem(tp, NIC_SRAM_FW_DRV_STATE_MBOX,
4206 DRV_STATE_START);
4207 break;
4208
4209 case RESET_KIND_SHUTDOWN:
4210 tg3_write_mem(tp, NIC_SRAM_FW_DRV_STATE_MBOX,
4211 DRV_STATE_UNLOAD);
4212 break;
4213
4214 case RESET_KIND_SUSPEND:
4215 tg3_write_mem(tp, NIC_SRAM_FW_DRV_STATE_MBOX,
4216 DRV_STATE_SUSPEND);
4217 break;
4218
4219 default:
4220 break;
4221 };
4222 }
4223}
4224
4225/* tp->lock is held. */
4226static void tg3_write_sig_post_reset(struct tg3 *tp, int kind)
4227{
4228 if (tp->tg3_flags2 & TG3_FLG2_ASF_NEW_HANDSHAKE) {
4229 switch (kind) {
4230 case RESET_KIND_INIT:
4231 tg3_write_mem(tp, NIC_SRAM_FW_DRV_STATE_MBOX,
4232 DRV_STATE_START_DONE);
4233 break;
4234
4235 case RESET_KIND_SHUTDOWN:
4236 tg3_write_mem(tp, NIC_SRAM_FW_DRV_STATE_MBOX,
4237 DRV_STATE_UNLOAD_DONE);
4238 break;
4239
4240 default:
4241 break;
4242 };
4243 }
4244}
4245
4246/* tp->lock is held. */
4247static void tg3_write_sig_legacy(struct tg3 *tp, int kind)
4248{
4249 if (tp->tg3_flags & TG3_FLAG_ENABLE_ASF) {
4250 switch (kind) {
4251 case RESET_KIND_INIT:
4252 tg3_write_mem(tp, NIC_SRAM_FW_DRV_STATE_MBOX,
4253 DRV_STATE_START);
4254 break;
4255
4256 case RESET_KIND_SHUTDOWN:
4257 tg3_write_mem(tp, NIC_SRAM_FW_DRV_STATE_MBOX,
4258 DRV_STATE_UNLOAD);
4259 break;
4260
4261 case RESET_KIND_SUSPEND:
4262 tg3_write_mem(tp, NIC_SRAM_FW_DRV_STATE_MBOX,
4263 DRV_STATE_SUSPEND);
4264 break;
4265
4266 default:
4267 break;
4268 };
4269 }
4270}
4271
4272static void tg3_stop_fw(struct tg3 *);
4273
4274/* tp->lock is held. */
4275static int tg3_chip_reset(struct tg3 *tp)
4276{
4277 u32 val;
Michael Chan1ee582d2005-08-09 20:16:46 -07004278 void (*write_op)(struct tg3 *, u32, u32);
Linus Torvalds1da177e2005-04-16 15:20:36 -07004279 int i;
4280
4281 if (!(tp->tg3_flags2 & TG3_FLG2_SUN_570X))
4282 tg3_nvram_lock(tp);
4283
4284 /*
4285 * We must avoid the readl() that normally takes place.
4286 * It locks machines, causes machine checks, and other
4287 * fun things. So, temporarily disable the 5701
4288 * hardware workaround, while we do the reset.
4289 */
Michael Chan1ee582d2005-08-09 20:16:46 -07004290 write_op = tp->write32;
4291 if (write_op == tg3_write_flush_reg32)
4292 tp->write32 = tg3_write32;
Linus Torvalds1da177e2005-04-16 15:20:36 -07004293
4294 /* do the reset */
4295 val = GRC_MISC_CFG_CORECLK_RESET;
4296
4297 if (tp->tg3_flags2 & TG3_FLG2_PCI_EXPRESS) {
4298 if (tr32(0x7e2c) == 0x60) {
4299 tw32(0x7e2c, 0x20);
4300 }
4301 if (tp->pci_chip_rev_id != CHIPREV_ID_5750_A0) {
4302 tw32(GRC_MISC_CFG, (1 << 29));
4303 val |= (1 << 29);
4304 }
4305 }
4306
4307 if (tp->tg3_flags2 & TG3_FLG2_5705_PLUS)
4308 val |= GRC_MISC_CFG_KEEP_GPHY_POWER;
4309 tw32(GRC_MISC_CFG, val);
4310
Michael Chan1ee582d2005-08-09 20:16:46 -07004311 /* restore 5701 hardware bug workaround write method */
4312 tp->write32 = write_op;
Linus Torvalds1da177e2005-04-16 15:20:36 -07004313
4314 /* Unfortunately, we have to delay before the PCI read back.
4315 * Some 575X chips even will not respond to a PCI cfg access
4316 * when the reset command is given to the chip.
4317 *
4318 * How do these hardware designers expect things to work
4319 * properly if the PCI write is posted for a long period
4320 * of time? It is always necessary to have some method by
4321 * which a register read back can occur to push the write
4322 * out which does the reset.
4323 *
4324 * For most tg3 variants the trick below was working.
4325 * Ho hum...
4326 */
4327 udelay(120);
4328
4329 /* Flush PCI posted writes. The normal MMIO registers
4330 * are inaccessible at this time so this is the only
4331 * way to make this reliably (actually, this is no longer
4332 * the case, see above). I tried to use indirect
4333 * register read/write but this upset some 5701 variants.
4334 */
4335 pci_read_config_dword(tp->pdev, PCI_COMMAND, &val);
4336
4337 udelay(120);
4338
4339 if (tp->tg3_flags2 & TG3_FLG2_PCI_EXPRESS) {
4340 if (tp->pci_chip_rev_id == CHIPREV_ID_5750_A0) {
4341 int i;
4342 u32 cfg_val;
4343
4344 /* Wait for link training to complete. */
4345 for (i = 0; i < 5000; i++)
4346 udelay(100);
4347
4348 pci_read_config_dword(tp->pdev, 0xc4, &cfg_val);
4349 pci_write_config_dword(tp->pdev, 0xc4,
4350 cfg_val | (1 << 15));
4351 }
4352 /* Set PCIE max payload size and clear error status. */
4353 pci_write_config_dword(tp->pdev, 0xd8, 0xf5000);
4354 }
4355
4356 /* Re-enable indirect register accesses. */
4357 pci_write_config_dword(tp->pdev, TG3PCI_MISC_HOST_CTRL,
4358 tp->misc_host_ctrl);
4359
4360 /* Set MAX PCI retry to zero. */
4361 val = (PCISTATE_ROM_ENABLE | PCISTATE_ROM_RETRY_ENABLE);
4362 if (tp->pci_chip_rev_id == CHIPREV_ID_5704_A0 &&
4363 (tp->tg3_flags & TG3_FLAG_PCIX_MODE))
4364 val |= PCISTATE_RETRY_SAME_DMA;
4365 pci_write_config_dword(tp->pdev, TG3PCI_PCISTATE, val);
4366
4367 pci_restore_state(tp->pdev);
4368
4369 /* Make sure PCI-X relaxed ordering bit is clear. */
4370 pci_read_config_dword(tp->pdev, TG3PCI_X_CAPS, &val);
4371 val &= ~PCIX_CAPS_RELAXED_ORDERING;
4372 pci_write_config_dword(tp->pdev, TG3PCI_X_CAPS, val);
4373
Michael Chan4cf78e42005-07-25 12:29:19 -07004374 if (GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_5780) {
4375 u32 val;
4376
4377 /* Chip reset on 5780 will reset MSI enable bit,
4378 * so need to restore it.
4379 */
4380 if (tp->tg3_flags2 & TG3_FLG2_USING_MSI) {
4381 u16 ctrl;
4382
4383 pci_read_config_word(tp->pdev,
4384 tp->msi_cap + PCI_MSI_FLAGS,
4385 &ctrl);
4386 pci_write_config_word(tp->pdev,
4387 tp->msi_cap + PCI_MSI_FLAGS,
4388 ctrl | PCI_MSI_FLAGS_ENABLE);
4389 val = tr32(MSGINT_MODE);
4390 tw32(MSGINT_MODE, val | MSGINT_MODE_ENABLE);
4391 }
4392
4393 val = tr32(MEMARB_MODE);
4394 tw32(MEMARB_MODE, val | MEMARB_MODE_ENABLE);
4395
4396 } else
4397 tw32(MEMARB_MODE, MEMARB_MODE_ENABLE);
Linus Torvalds1da177e2005-04-16 15:20:36 -07004398
4399 if (tp->pci_chip_rev_id == CHIPREV_ID_5750_A3) {
4400 tg3_stop_fw(tp);
4401 tw32(0x5000, 0x400);
4402 }
4403
4404 tw32(GRC_MODE, tp->grc_mode);
4405
4406 if (tp->pci_chip_rev_id == CHIPREV_ID_5705_A0) {
4407 u32 val = tr32(0xc4);
4408
4409 tw32(0xc4, val | (1 << 15));
4410 }
4411
4412 if ((tp->nic_sram_data_cfg & NIC_SRAM_DATA_CFG_MINI_PCI) != 0 &&
4413 GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_5705) {
4414 tp->pci_clock_ctrl |= CLOCK_CTRL_CLKRUN_OENABLE;
4415 if (tp->pci_chip_rev_id == CHIPREV_ID_5705_A0)
4416 tp->pci_clock_ctrl |= CLOCK_CTRL_FORCE_CLKRUN;
4417 tw32(TG3PCI_CLOCK_CTRL, tp->pci_clock_ctrl);
4418 }
4419
4420 if (tp->tg3_flags2 & TG3_FLG2_PHY_SERDES) {
4421 tp->mac_mode = MAC_MODE_PORT_MODE_TBI;
4422 tw32_f(MAC_MODE, tp->mac_mode);
Michael Chan747e8f82005-07-25 12:33:22 -07004423 } else if (tp->tg3_flags2 & TG3_FLG2_MII_SERDES) {
4424 tp->mac_mode = MAC_MODE_PORT_MODE_GMII;
4425 tw32_f(MAC_MODE, tp->mac_mode);
Linus Torvalds1da177e2005-04-16 15:20:36 -07004426 } else
4427 tw32_f(MAC_MODE, 0);
4428 udelay(40);
4429
4430 if (!(tp->tg3_flags2 & TG3_FLG2_SUN_570X)) {
4431 /* Wait for firmware initialization to complete. */
4432 for (i = 0; i < 100000; i++) {
4433 tg3_read_mem(tp, NIC_SRAM_FIRMWARE_MBOX, &val);
4434 if (val == ~NIC_SRAM_FIRMWARE_MBOX_MAGIC1)
4435 break;
4436 udelay(10);
4437 }
4438 if (i >= 100000) {
4439 printk(KERN_ERR PFX "tg3_reset_hw timed out for %s, "
4440 "firmware will not restart magic=%08x\n",
4441 tp->dev->name, val);
4442 return -ENODEV;
4443 }
4444 }
4445
4446 if ((tp->tg3_flags2 & TG3_FLG2_PCI_EXPRESS) &&
4447 tp->pci_chip_rev_id != CHIPREV_ID_5750_A0) {
4448 u32 val = tr32(0x7c00);
4449
4450 tw32(0x7c00, val | (1 << 25));
4451 }
4452
4453 /* Reprobe ASF enable state. */
4454 tp->tg3_flags &= ~TG3_FLAG_ENABLE_ASF;
4455 tp->tg3_flags2 &= ~TG3_FLG2_ASF_NEW_HANDSHAKE;
4456 tg3_read_mem(tp, NIC_SRAM_DATA_SIG, &val);
4457 if (val == NIC_SRAM_DATA_SIG_MAGIC) {
4458 u32 nic_cfg;
4459
4460 tg3_read_mem(tp, NIC_SRAM_DATA_CFG, &nic_cfg);
4461 if (nic_cfg & NIC_SRAM_DATA_CFG_ASF_ENABLE) {
4462 tp->tg3_flags |= TG3_FLAG_ENABLE_ASF;
John W. Linvillecbf46852005-04-21 17:01:29 -07004463 if (tp->tg3_flags2 & TG3_FLG2_5750_PLUS)
Linus Torvalds1da177e2005-04-16 15:20:36 -07004464 tp->tg3_flags2 |= TG3_FLG2_ASF_NEW_HANDSHAKE;
4465 }
4466 }
4467
4468 return 0;
4469}
4470
4471/* tp->lock is held. */
4472static void tg3_stop_fw(struct tg3 *tp)
4473{
4474 if (tp->tg3_flags & TG3_FLAG_ENABLE_ASF) {
4475 u32 val;
4476 int i;
4477
4478 tg3_write_mem(tp, NIC_SRAM_FW_CMD_MBOX, FWCMD_NICDRV_PAUSE_FW);
4479 val = tr32(GRC_RX_CPU_EVENT);
4480 val |= (1 << 14);
4481 tw32(GRC_RX_CPU_EVENT, val);
4482
4483 /* Wait for RX cpu to ACK the event. */
4484 for (i = 0; i < 100; i++) {
4485 if (!(tr32(GRC_RX_CPU_EVENT) & (1 << 14)))
4486 break;
4487 udelay(1);
4488 }
4489 }
4490}
4491
4492/* tp->lock is held. */
Michael Chan944d9802005-05-29 14:57:48 -07004493static int tg3_halt(struct tg3 *tp, int kind, int silent)
Linus Torvalds1da177e2005-04-16 15:20:36 -07004494{
4495 int err;
4496
4497 tg3_stop_fw(tp);
4498
Michael Chan944d9802005-05-29 14:57:48 -07004499 tg3_write_sig_pre_reset(tp, kind);
Linus Torvalds1da177e2005-04-16 15:20:36 -07004500
David S. Millerb3b7d6b2005-05-05 14:40:20 -07004501 tg3_abort_hw(tp, silent);
Linus Torvalds1da177e2005-04-16 15:20:36 -07004502 err = tg3_chip_reset(tp);
4503
Michael Chan944d9802005-05-29 14:57:48 -07004504 tg3_write_sig_legacy(tp, kind);
4505 tg3_write_sig_post_reset(tp, kind);
Linus Torvalds1da177e2005-04-16 15:20:36 -07004506
4507 if (err)
4508 return err;
4509
4510 return 0;
4511}
4512
4513#define TG3_FW_RELEASE_MAJOR 0x0
4514#define TG3_FW_RELASE_MINOR 0x0
4515#define TG3_FW_RELEASE_FIX 0x0
4516#define TG3_FW_START_ADDR 0x08000000
4517#define TG3_FW_TEXT_ADDR 0x08000000
4518#define TG3_FW_TEXT_LEN 0x9c0
4519#define TG3_FW_RODATA_ADDR 0x080009c0
4520#define TG3_FW_RODATA_LEN 0x60
4521#define TG3_FW_DATA_ADDR 0x08000a40
4522#define TG3_FW_DATA_LEN 0x20
4523#define TG3_FW_SBSS_ADDR 0x08000a60
4524#define TG3_FW_SBSS_LEN 0xc
4525#define TG3_FW_BSS_ADDR 0x08000a70
4526#define TG3_FW_BSS_LEN 0x10
4527
4528static u32 tg3FwText[(TG3_FW_TEXT_LEN / sizeof(u32)) + 1] = {
4529 0x00000000, 0x10000003, 0x00000000, 0x0000000d, 0x0000000d, 0x3c1d0800,
4530 0x37bd3ffc, 0x03a0f021, 0x3c100800, 0x26100000, 0x0e000018, 0x00000000,
4531 0x0000000d, 0x3c1d0800, 0x37bd3ffc, 0x03a0f021, 0x3c100800, 0x26100034,
4532 0x0e00021c, 0x00000000, 0x0000000d, 0x00000000, 0x00000000, 0x00000000,
4533 0x27bdffe0, 0x3c1cc000, 0xafbf0018, 0xaf80680c, 0x0e00004c, 0x241b2105,
4534 0x97850000, 0x97870002, 0x9782002c, 0x9783002e, 0x3c040800, 0x248409c0,
4535 0xafa00014, 0x00021400, 0x00621825, 0x00052c00, 0xafa30010, 0x8f860010,
4536 0x00e52825, 0x0e000060, 0x24070102, 0x3c02ac00, 0x34420100, 0x3c03ac01,
4537 0x34630100, 0xaf820490, 0x3c02ffff, 0xaf820494, 0xaf830498, 0xaf82049c,
4538 0x24020001, 0xaf825ce0, 0x0e00003f, 0xaf825d00, 0x0e000140, 0x00000000,
4539 0x8fbf0018, 0x03e00008, 0x27bd0020, 0x2402ffff, 0xaf825404, 0x8f835400,
4540 0x34630400, 0xaf835400, 0xaf825404, 0x3c020800, 0x24420034, 0xaf82541c,
4541 0x03e00008, 0xaf805400, 0x00000000, 0x00000000, 0x3c020800, 0x34423000,
4542 0x3c030800, 0x34633000, 0x3c040800, 0x348437ff, 0x3c010800, 0xac220a64,
4543 0x24020040, 0x3c010800, 0xac220a68, 0x3c010800, 0xac200a60, 0xac600000,
4544 0x24630004, 0x0083102b, 0x5040fffd, 0xac600000, 0x03e00008, 0x00000000,
4545 0x00804821, 0x8faa0010, 0x3c020800, 0x8c420a60, 0x3c040800, 0x8c840a68,
4546 0x8fab0014, 0x24430001, 0x0044102b, 0x3c010800, 0xac230a60, 0x14400003,
4547 0x00004021, 0x3c010800, 0xac200a60, 0x3c020800, 0x8c420a60, 0x3c030800,
4548 0x8c630a64, 0x91240000, 0x00021140, 0x00431021, 0x00481021, 0x25080001,
4549 0xa0440000, 0x29020008, 0x1440fff4, 0x25290001, 0x3c020800, 0x8c420a60,
4550 0x3c030800, 0x8c630a64, 0x8f84680c, 0x00021140, 0x00431021, 0xac440008,
4551 0xac45000c, 0xac460010, 0xac470014, 0xac4a0018, 0x03e00008, 0xac4b001c,
4552 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0,
4553 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0,
4554 0, 0, 0, 0, 0, 0,
4555 0x02000008, 0x00000000, 0x0a0001e3, 0x3c0a0001, 0x0a0001e3, 0x3c0a0002,
4556 0x0a0001e3, 0x00000000, 0x0a0001e3, 0x00000000, 0x0a0001e3, 0x00000000,
4557 0x0a0001e3, 0x00000000, 0x0a0001e3, 0x00000000, 0x0a0001e3, 0x00000000,
4558 0x0a0001e3, 0x00000000, 0x0a0001e3, 0x00000000, 0x0a0001e3, 0x00000000,
4559 0x0a0001e3, 0x3c0a0007, 0x0a0001e3, 0x3c0a0008, 0x0a0001e3, 0x3c0a0009,
4560 0x0a0001e3, 0x00000000, 0x0a0001e3, 0x00000000, 0x0a0001e3, 0x3c0a000b,
4561 0x0a0001e3, 0x3c0a000c, 0x0a0001e3, 0x3c0a000d, 0x0a0001e3, 0x00000000,
4562 0x0a0001e3, 0x00000000, 0x0a0001e3, 0x3c0a000e, 0x0a0001e3, 0x00000000,
4563 0x0a0001e3, 0x00000000, 0x0a0001e3, 0x00000000, 0x0a0001e3, 0x00000000,
4564 0x0a0001e3, 0x00000000, 0x0a0001e3, 0x00000000, 0x0a0001e3, 0x00000000,
4565 0x0a0001e3, 0x00000000, 0x0a0001e3, 0x3c0a0013, 0x0a0001e3, 0x3c0a0014,
4566 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0,
4567 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0,
4568 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0,
4569 0x27bdffe0, 0x00001821, 0x00001021, 0xafbf0018, 0xafb10014, 0xafb00010,
4570 0x3c010800, 0x00220821, 0xac200a70, 0x3c010800, 0x00220821, 0xac200a74,
4571 0x3c010800, 0x00220821, 0xac200a78, 0x24630001, 0x1860fff5, 0x2442000c,
4572 0x24110001, 0x8f906810, 0x32020004, 0x14400005, 0x24040001, 0x3c020800,
4573 0x8c420a78, 0x18400003, 0x00002021, 0x0e000182, 0x00000000, 0x32020001,
4574 0x10400003, 0x00000000, 0x0e000169, 0x00000000, 0x0a000153, 0xaf915028,
4575 0x8fbf0018, 0x8fb10014, 0x8fb00010, 0x03e00008, 0x27bd0020, 0x3c050800,
4576 0x8ca50a70, 0x3c060800, 0x8cc60a80, 0x3c070800, 0x8ce70a78, 0x27bdffe0,
4577 0x3c040800, 0x248409d0, 0xafbf0018, 0xafa00010, 0x0e000060, 0xafa00014,
4578 0x0e00017b, 0x00002021, 0x8fbf0018, 0x03e00008, 0x27bd0020, 0x24020001,
4579 0x8f836810, 0x00821004, 0x00021027, 0x00621824, 0x03e00008, 0xaf836810,
4580 0x27bdffd8, 0xafbf0024, 0x1080002e, 0xafb00020, 0x8f825cec, 0xafa20018,
4581 0x8f825cec, 0x3c100800, 0x26100a78, 0xafa2001c, 0x34028000, 0xaf825cec,
4582 0x8e020000, 0x18400016, 0x00000000, 0x3c020800, 0x94420a74, 0x8fa3001c,
4583 0x000221c0, 0xac830004, 0x8fa2001c, 0x3c010800, 0x0e000201, 0xac220a74,
4584 0x10400005, 0x00000000, 0x8e020000, 0x24420001, 0x0a0001df, 0xae020000,
4585 0x3c020800, 0x8c420a70, 0x00021c02, 0x000321c0, 0x0a0001c5, 0xafa2001c,
4586 0x0e000201, 0x00000000, 0x1040001f, 0x00000000, 0x8e020000, 0x8fa3001c,
4587 0x24420001, 0x3c010800, 0xac230a70, 0x3c010800, 0xac230a74, 0x0a0001df,
4588 0xae020000, 0x3c100800, 0x26100a78, 0x8e020000, 0x18400028, 0x00000000,
4589 0x0e000201, 0x00000000, 0x14400024, 0x00000000, 0x8e020000, 0x3c030800,
4590 0x8c630a70, 0x2442ffff, 0xafa3001c, 0x18400006, 0xae020000, 0x00031402,
4591 0x000221c0, 0x8c820004, 0x3c010800, 0xac220a70, 0x97a2001e, 0x2442ff00,
4592 0x2c420300, 0x1440000b, 0x24024000, 0x3c040800, 0x248409dc, 0xafa00010,
4593 0xafa00014, 0x8fa6001c, 0x24050008, 0x0e000060, 0x00003821, 0x0a0001df,
4594 0x00000000, 0xaf825cf8, 0x3c020800, 0x8c420a40, 0x8fa3001c, 0x24420001,
4595 0xaf835cf8, 0x3c010800, 0xac220a40, 0x8fbf0024, 0x8fb00020, 0x03e00008,
4596 0x27bd0028, 0x27bdffe0, 0x3c040800, 0x248409e8, 0x00002821, 0x00003021,
4597 0x00003821, 0xafbf0018, 0xafa00010, 0x0e000060, 0xafa00014, 0x8fbf0018,
4598 0x03e00008, 0x27bd0020, 0x8f82680c, 0x8f85680c, 0x00021827, 0x0003182b,
4599 0x00031823, 0x00431024, 0x00441021, 0x00a2282b, 0x10a00006, 0x00000000,
4600 0x00401821, 0x8f82680c, 0x0043102b, 0x1440fffd, 0x00000000, 0x03e00008,
4601 0x00000000, 0x3c040800, 0x8c840000, 0x3c030800, 0x8c630a40, 0x0064102b,
4602 0x54400002, 0x00831023, 0x00641023, 0x2c420008, 0x03e00008, 0x38420001,
4603 0x27bdffe0, 0x00802821, 0x3c040800, 0x24840a00, 0x00003021, 0x00003821,
4604 0xafbf0018, 0xafa00010, 0x0e000060, 0xafa00014, 0x0a000216, 0x00000000,
4605 0x8fbf0018, 0x03e00008, 0x27bd0020, 0x00000000, 0x27bdffe0, 0x3c1cc000,
4606 0xafbf0018, 0x0e00004c, 0xaf80680c, 0x3c040800, 0x24840a10, 0x03802821,
4607 0x00003021, 0x00003821, 0xafa00010, 0x0e000060, 0xafa00014, 0x2402ffff,
4608 0xaf825404, 0x3c0200aa, 0x0e000234, 0xaf825434, 0x8fbf0018, 0x03e00008,
4609 0x27bd0020, 0x00000000, 0x00000000, 0x00000000, 0x27bdffe8, 0xafb00010,
4610 0x24100001, 0xafbf0014, 0x3c01c003, 0xac200000, 0x8f826810, 0x30422000,
4611 0x10400003, 0x00000000, 0x0e000246, 0x00000000, 0x0a00023a, 0xaf905428,
4612 0x8fbf0014, 0x8fb00010, 0x03e00008, 0x27bd0018, 0x27bdfff8, 0x8f845d0c,
4613 0x3c0200ff, 0x3c030800, 0x8c630a50, 0x3442fff8, 0x00821024, 0x1043001e,
4614 0x3c0500ff, 0x34a5fff8, 0x3c06c003, 0x3c074000, 0x00851824, 0x8c620010,
4615 0x3c010800, 0xac230a50, 0x30420008, 0x10400005, 0x00871025, 0x8cc20000,
4616 0x24420001, 0xacc20000, 0x00871025, 0xaf825d0c, 0x8fa20000, 0x24420001,
4617 0xafa20000, 0x8fa20000, 0x8fa20000, 0x24420001, 0xafa20000, 0x8fa20000,
4618 0x8f845d0c, 0x3c030800, 0x8c630a50, 0x00851024, 0x1443ffe8, 0x00851824,
4619 0x27bd0008, 0x03e00008, 0x00000000, 0x00000000, 0x00000000
4620};
4621
4622static u32 tg3FwRodata[(TG3_FW_RODATA_LEN / sizeof(u32)) + 1] = {
4623 0x35373031, 0x726c7341, 0x00000000, 0x00000000, 0x53774576, 0x656e7430,
4624 0x00000000, 0x726c7045, 0x76656e74, 0x31000000, 0x556e6b6e, 0x45766e74,
4625 0x00000000, 0x00000000, 0x00000000, 0x00000000, 0x66617461, 0x6c457272,
4626 0x00000000, 0x00000000, 0x4d61696e, 0x43707542, 0x00000000, 0x00000000,
4627 0x00000000
4628};
4629
4630#if 0 /* All zeros, don't eat up space with it. */
4631u32 tg3FwData[(TG3_FW_DATA_LEN / sizeof(u32)) + 1] = {
4632 0x00000000, 0x00000000, 0x00000000, 0x00000000, 0x00000000,
4633 0x00000000, 0x00000000, 0x00000000, 0x00000000
4634};
4635#endif
4636
4637#define RX_CPU_SCRATCH_BASE 0x30000
4638#define RX_CPU_SCRATCH_SIZE 0x04000
4639#define TX_CPU_SCRATCH_BASE 0x34000
4640#define TX_CPU_SCRATCH_SIZE 0x04000
4641
4642/* tp->lock is held. */
4643static int tg3_halt_cpu(struct tg3 *tp, u32 offset)
4644{
4645 int i;
4646
4647 if (offset == TX_CPU_BASE &&
4648 (tp->tg3_flags2 & TG3_FLG2_5705_PLUS))
4649 BUG();
4650
4651 if (offset == RX_CPU_BASE) {
4652 for (i = 0; i < 10000; i++) {
4653 tw32(offset + CPU_STATE, 0xffffffff);
4654 tw32(offset + CPU_MODE, CPU_MODE_HALT);
4655 if (tr32(offset + CPU_MODE) & CPU_MODE_HALT)
4656 break;
4657 }
4658
4659 tw32(offset + CPU_STATE, 0xffffffff);
4660 tw32_f(offset + CPU_MODE, CPU_MODE_HALT);
4661 udelay(10);
4662 } else {
4663 for (i = 0; i < 10000; i++) {
4664 tw32(offset + CPU_STATE, 0xffffffff);
4665 tw32(offset + CPU_MODE, CPU_MODE_HALT);
4666 if (tr32(offset + CPU_MODE) & CPU_MODE_HALT)
4667 break;
4668 }
4669 }
4670
4671 if (i >= 10000) {
4672 printk(KERN_ERR PFX "tg3_reset_cpu timed out for %s, "
4673 "and %s CPU\n",
4674 tp->dev->name,
4675 (offset == RX_CPU_BASE ? "RX" : "TX"));
4676 return -ENODEV;
4677 }
4678 return 0;
4679}
4680
4681struct fw_info {
4682 unsigned int text_base;
4683 unsigned int text_len;
4684 u32 *text_data;
4685 unsigned int rodata_base;
4686 unsigned int rodata_len;
4687 u32 *rodata_data;
4688 unsigned int data_base;
4689 unsigned int data_len;
4690 u32 *data_data;
4691};
4692
4693/* tp->lock is held. */
4694static int tg3_load_firmware_cpu(struct tg3 *tp, u32 cpu_base, u32 cpu_scratch_base,
4695 int cpu_scratch_size, struct fw_info *info)
4696{
4697 int err, i;
Linus Torvalds1da177e2005-04-16 15:20:36 -07004698 void (*write_op)(struct tg3 *, u32, u32);
4699
4700 if (cpu_base == TX_CPU_BASE &&
4701 (tp->tg3_flags2 & TG3_FLG2_5705_PLUS)) {
4702 printk(KERN_ERR PFX "tg3_load_firmware_cpu: Trying to load "
4703 "TX cpu firmware on %s which is 5705.\n",
4704 tp->dev->name);
4705 return -EINVAL;
4706 }
4707
4708 if (tp->tg3_flags2 & TG3_FLG2_5705_PLUS)
4709 write_op = tg3_write_mem;
4710 else
4711 write_op = tg3_write_indirect_reg32;
4712
Michael Chan1b628152005-05-29 14:59:49 -07004713 /* It is possible that bootcode is still loading at this point.
4714 * Get the nvram lock first before halting the cpu.
4715 */
4716 tg3_nvram_lock(tp);
Linus Torvalds1da177e2005-04-16 15:20:36 -07004717 err = tg3_halt_cpu(tp, cpu_base);
Michael Chan1b628152005-05-29 14:59:49 -07004718 tg3_nvram_unlock(tp);
Linus Torvalds1da177e2005-04-16 15:20:36 -07004719 if (err)
4720 goto out;
4721
4722 for (i = 0; i < cpu_scratch_size; i += sizeof(u32))
4723 write_op(tp, cpu_scratch_base + i, 0);
4724 tw32(cpu_base + CPU_STATE, 0xffffffff);
4725 tw32(cpu_base + CPU_MODE, tr32(cpu_base+CPU_MODE)|CPU_MODE_HALT);
4726 for (i = 0; i < (info->text_len / sizeof(u32)); i++)
4727 write_op(tp, (cpu_scratch_base +
4728 (info->text_base & 0xffff) +
4729 (i * sizeof(u32))),
4730 (info->text_data ?
4731 info->text_data[i] : 0));
4732 for (i = 0; i < (info->rodata_len / sizeof(u32)); i++)
4733 write_op(tp, (cpu_scratch_base +
4734 (info->rodata_base & 0xffff) +
4735 (i * sizeof(u32))),
4736 (info->rodata_data ?
4737 info->rodata_data[i] : 0));
4738 for (i = 0; i < (info->data_len / sizeof(u32)); i++)
4739 write_op(tp, (cpu_scratch_base +
4740 (info->data_base & 0xffff) +
4741 (i * sizeof(u32))),
4742 (info->data_data ?
4743 info->data_data[i] : 0));
4744
4745 err = 0;
4746
4747out:
Linus Torvalds1da177e2005-04-16 15:20:36 -07004748 return err;
4749}
4750
4751/* tp->lock is held. */
4752static int tg3_load_5701_a0_firmware_fix(struct tg3 *tp)
4753{
4754 struct fw_info info;
4755 int err, i;
4756
4757 info.text_base = TG3_FW_TEXT_ADDR;
4758 info.text_len = TG3_FW_TEXT_LEN;
4759 info.text_data = &tg3FwText[0];
4760 info.rodata_base = TG3_FW_RODATA_ADDR;
4761 info.rodata_len = TG3_FW_RODATA_LEN;
4762 info.rodata_data = &tg3FwRodata[0];
4763 info.data_base = TG3_FW_DATA_ADDR;
4764 info.data_len = TG3_FW_DATA_LEN;
4765 info.data_data = NULL;
4766
4767 err = tg3_load_firmware_cpu(tp, RX_CPU_BASE,
4768 RX_CPU_SCRATCH_BASE, RX_CPU_SCRATCH_SIZE,
4769 &info);
4770 if (err)
4771 return err;
4772
4773 err = tg3_load_firmware_cpu(tp, TX_CPU_BASE,
4774 TX_CPU_SCRATCH_BASE, TX_CPU_SCRATCH_SIZE,
4775 &info);
4776 if (err)
4777 return err;
4778
4779 /* Now startup only the RX cpu. */
4780 tw32(RX_CPU_BASE + CPU_STATE, 0xffffffff);
4781 tw32_f(RX_CPU_BASE + CPU_PC, TG3_FW_TEXT_ADDR);
4782
4783 for (i = 0; i < 5; i++) {
4784 if (tr32(RX_CPU_BASE + CPU_PC) == TG3_FW_TEXT_ADDR)
4785 break;
4786 tw32(RX_CPU_BASE + CPU_STATE, 0xffffffff);
4787 tw32(RX_CPU_BASE + CPU_MODE, CPU_MODE_HALT);
4788 tw32_f(RX_CPU_BASE + CPU_PC, TG3_FW_TEXT_ADDR);
4789 udelay(1000);
4790 }
4791 if (i >= 5) {
4792 printk(KERN_ERR PFX "tg3_load_firmware fails for %s "
4793 "to set RX CPU PC, is %08x should be %08x\n",
4794 tp->dev->name, tr32(RX_CPU_BASE + CPU_PC),
4795 TG3_FW_TEXT_ADDR);
4796 return -ENODEV;
4797 }
4798 tw32(RX_CPU_BASE + CPU_STATE, 0xffffffff);
4799 tw32_f(RX_CPU_BASE + CPU_MODE, 0x00000000);
4800
4801 return 0;
4802}
4803
4804#if TG3_TSO_SUPPORT != 0
4805
4806#define TG3_TSO_FW_RELEASE_MAJOR 0x1
4807#define TG3_TSO_FW_RELASE_MINOR 0x6
4808#define TG3_TSO_FW_RELEASE_FIX 0x0
4809#define TG3_TSO_FW_START_ADDR 0x08000000
4810#define TG3_TSO_FW_TEXT_ADDR 0x08000000
4811#define TG3_TSO_FW_TEXT_LEN 0x1aa0
4812#define TG3_TSO_FW_RODATA_ADDR 0x08001aa0
4813#define TG3_TSO_FW_RODATA_LEN 0x60
4814#define TG3_TSO_FW_DATA_ADDR 0x08001b20
4815#define TG3_TSO_FW_DATA_LEN 0x30
4816#define TG3_TSO_FW_SBSS_ADDR 0x08001b50
4817#define TG3_TSO_FW_SBSS_LEN 0x2c
4818#define TG3_TSO_FW_BSS_ADDR 0x08001b80
4819#define TG3_TSO_FW_BSS_LEN 0x894
4820
4821static u32 tg3TsoFwText[(TG3_TSO_FW_TEXT_LEN / 4) + 1] = {
4822 0x0e000003, 0x00000000, 0x08001b24, 0x00000000, 0x10000003, 0x00000000,
4823 0x0000000d, 0x0000000d, 0x3c1d0800, 0x37bd4000, 0x03a0f021, 0x3c100800,
4824 0x26100000, 0x0e000010, 0x00000000, 0x0000000d, 0x27bdffe0, 0x3c04fefe,
4825 0xafbf0018, 0x0e0005d8, 0x34840002, 0x0e000668, 0x00000000, 0x3c030800,
4826 0x90631b68, 0x24020002, 0x3c040800, 0x24841aac, 0x14620003, 0x24050001,
4827 0x3c040800, 0x24841aa0, 0x24060006, 0x00003821, 0xafa00010, 0x0e00067c,
4828 0xafa00014, 0x8f625c50, 0x34420001, 0xaf625c50, 0x8f625c90, 0x34420001,
4829 0xaf625c90, 0x2402ffff, 0x0e000034, 0xaf625404, 0x8fbf0018, 0x03e00008,
4830 0x27bd0020, 0x00000000, 0x00000000, 0x00000000, 0x27bdffe0, 0xafbf001c,
4831 0xafb20018, 0xafb10014, 0x0e00005b, 0xafb00010, 0x24120002, 0x24110001,
4832 0x8f706820, 0x32020100, 0x10400003, 0x00000000, 0x0e0000bb, 0x00000000,
4833 0x8f706820, 0x32022000, 0x10400004, 0x32020001, 0x0e0001f0, 0x24040001,
4834 0x32020001, 0x10400003, 0x00000000, 0x0e0000a3, 0x00000000, 0x3c020800,
4835 0x90421b98, 0x14520003, 0x00000000, 0x0e0004c0, 0x00000000, 0x0a00003c,
4836 0xaf715028, 0x8fbf001c, 0x8fb20018, 0x8fb10014, 0x8fb00010, 0x03e00008,
4837 0x27bd0020, 0x27bdffe0, 0x3c040800, 0x24841ac0, 0x00002821, 0x00003021,
4838 0x00003821, 0xafbf0018, 0xafa00010, 0x0e00067c, 0xafa00014, 0x3c040800,
4839 0x248423d8, 0xa4800000, 0x3c010800, 0xa0201b98, 0x3c010800, 0xac201b9c,
4840 0x3c010800, 0xac201ba0, 0x3c010800, 0xac201ba4, 0x3c010800, 0xac201bac,
4841 0x3c010800, 0xac201bb8, 0x3c010800, 0xac201bbc, 0x8f624434, 0x3c010800,
4842 0xac221b88, 0x8f624438, 0x3c010800, 0xac221b8c, 0x8f624410, 0xac80f7a8,
4843 0x3c010800, 0xac201b84, 0x3c010800, 0xac2023e0, 0x3c010800, 0xac2023c8,
4844 0x3c010800, 0xac2023cc, 0x3c010800, 0xac202400, 0x3c010800, 0xac221b90,
4845 0x8f620068, 0x24030007, 0x00021702, 0x10430005, 0x00000000, 0x8f620068,
4846 0x00021702, 0x14400004, 0x24020001, 0x3c010800, 0x0a000097, 0xac20240c,
4847 0xac820034, 0x3c040800, 0x24841acc, 0x3c050800, 0x8ca5240c, 0x00003021,
4848 0x00003821, 0xafa00010, 0x0e00067c, 0xafa00014, 0x8fbf0018, 0x03e00008,
4849 0x27bd0020, 0x27bdffe0, 0x3c040800, 0x24841ad8, 0x00002821, 0x00003021,
4850 0x00003821, 0xafbf0018, 0xafa00010, 0x0e00067c, 0xafa00014, 0x0e00005b,
4851 0x00000000, 0x0e0000b4, 0x00002021, 0x8fbf0018, 0x03e00008, 0x27bd0020,
4852 0x24020001, 0x8f636820, 0x00821004, 0x00021027, 0x00621824, 0x03e00008,
4853 0xaf636820, 0x27bdffd0, 0xafbf002c, 0xafb60028, 0xafb50024, 0xafb40020,
4854 0xafb3001c, 0xafb20018, 0xafb10014, 0xafb00010, 0x8f675c5c, 0x3c030800,
4855 0x24631bbc, 0x8c620000, 0x14470005, 0x3c0200ff, 0x3c020800, 0x90421b98,
4856 0x14400119, 0x3c0200ff, 0x3442fff8, 0x00e28824, 0xac670000, 0x00111902,
4857 0x306300ff, 0x30e20003, 0x000211c0, 0x00622825, 0x00a04021, 0x00071602,
4858 0x3c030800, 0x90631b98, 0x3044000f, 0x14600036, 0x00804821, 0x24020001,
4859 0x3c010800, 0xa0221b98, 0x00051100, 0x00821025, 0x3c010800, 0xac201b9c,
4860 0x3c010800, 0xac201ba0, 0x3c010800, 0xac201ba4, 0x3c010800, 0xac201bac,
4861 0x3c010800, 0xac201bb8, 0x3c010800, 0xac201bb0, 0x3c010800, 0xac201bb4,
4862 0x3c010800, 0xa42223d8, 0x9622000c, 0x30437fff, 0x3c010800, 0xa4222410,
4863 0x30428000, 0x3c010800, 0xa4231bc6, 0x10400005, 0x24020001, 0x3c010800,
4864 0xac2223f4, 0x0a000102, 0x2406003e, 0x24060036, 0x3c010800, 0xac2023f4,
4865 0x9622000a, 0x3c030800, 0x94631bc6, 0x3c010800, 0xac2023f0, 0x3c010800,
4866 0xac2023f8, 0x00021302, 0x00021080, 0x00c21021, 0x00621821, 0x3c010800,
4867 0xa42223d0, 0x3c010800, 0x0a000115, 0xa4231b96, 0x9622000c, 0x3c010800,
4868 0xa42223ec, 0x3c040800, 0x24841b9c, 0x8c820000, 0x00021100, 0x3c010800,
4869 0x00220821, 0xac311bc8, 0x8c820000, 0x00021100, 0x3c010800, 0x00220821,
4870 0xac271bcc, 0x8c820000, 0x25030001, 0x306601ff, 0x00021100, 0x3c010800,
4871 0x00220821, 0xac261bd0, 0x8c820000, 0x00021100, 0x3c010800, 0x00220821,
4872 0xac291bd4, 0x96230008, 0x3c020800, 0x8c421bac, 0x00432821, 0x3c010800,
4873 0xac251bac, 0x9622000a, 0x30420004, 0x14400018, 0x00061100, 0x8f630c14,
4874 0x3063000f, 0x2c620002, 0x1440000b, 0x3c02c000, 0x8f630c14, 0x3c020800,
4875 0x8c421b40, 0x3063000f, 0x24420001, 0x3c010800, 0xac221b40, 0x2c620002,
4876 0x1040fff7, 0x3c02c000, 0x00e21825, 0xaf635c5c, 0x8f625c50, 0x30420002,
4877 0x10400014, 0x00000000, 0x0a000147, 0x00000000, 0x3c030800, 0x8c631b80,
4878 0x3c040800, 0x94841b94, 0x01221025, 0x3c010800, 0xa42223da, 0x24020001,
4879 0x3c010800, 0xac221bb8, 0x24630001, 0x0085202a, 0x3c010800, 0x10800003,
4880 0xac231b80, 0x3c010800, 0xa4251b94, 0x3c060800, 0x24c61b9c, 0x8cc20000,
4881 0x24420001, 0xacc20000, 0x28420080, 0x14400005, 0x00000000, 0x0e000656,
4882 0x24040002, 0x0a0001e6, 0x00000000, 0x3c020800, 0x8c421bb8, 0x10400078,
4883 0x24020001, 0x3c050800, 0x90a51b98, 0x14a20072, 0x00000000, 0x3c150800,
4884 0x96b51b96, 0x3c040800, 0x8c841bac, 0x32a3ffff, 0x0083102a, 0x1440006c,
4885 0x00000000, 0x14830003, 0x00000000, 0x3c010800, 0xac2523f0, 0x1060005c,
4886 0x00009021, 0x24d60004, 0x0060a021, 0x24d30014, 0x8ec20000, 0x00028100,
4887 0x3c110800, 0x02308821, 0x0e000625, 0x8e311bc8, 0x00402821, 0x10a00054,
4888 0x00000000, 0x9628000a, 0x31020040, 0x10400005, 0x2407180c, 0x8e22000c,
4889 0x2407188c, 0x00021400, 0xaca20018, 0x3c030800, 0x00701821, 0x8c631bd0,
4890 0x3c020800, 0x00501021, 0x8c421bd4, 0x00031d00, 0x00021400, 0x00621825,
4891 0xaca30014, 0x8ec30004, 0x96220008, 0x00432023, 0x3242ffff, 0x3083ffff,
4892 0x00431021, 0x0282102a, 0x14400002, 0x02b23023, 0x00803021, 0x8e620000,
4893 0x30c4ffff, 0x00441021, 0xae620000, 0x8e220000, 0xaca20000, 0x8e220004,
4894 0x8e63fff4, 0x00431021, 0xaca20004, 0xa4a6000e, 0x8e62fff4, 0x00441021,
4895 0xae62fff4, 0x96230008, 0x0043102a, 0x14400005, 0x02469021, 0x8e62fff0,
4896 0xae60fff4, 0x24420001, 0xae62fff0, 0xaca00008, 0x3242ffff, 0x14540008,
4897 0x24020305, 0x31020080, 0x54400001, 0x34e70010, 0x24020905, 0xa4a2000c,
4898 0x0a0001cb, 0x34e70020, 0xa4a2000c, 0x3c020800, 0x8c4223f0, 0x10400003,
4899 0x3c024b65, 0x0a0001d3, 0x34427654, 0x3c02b49a, 0x344289ab, 0xaca2001c,
4900 0x30e2ffff, 0xaca20010, 0x0e0005a2, 0x00a02021, 0x3242ffff, 0x0054102b,
4901 0x1440ffa9, 0x00000000, 0x24020002, 0x3c010800, 0x0a0001e6, 0xa0221b98,
4902 0x8ec2083c, 0x24420001, 0x0a0001e6, 0xaec2083c, 0x0e0004c0, 0x00000000,
4903 0x8fbf002c, 0x8fb60028, 0x8fb50024, 0x8fb40020, 0x8fb3001c, 0x8fb20018,
4904 0x8fb10014, 0x8fb00010, 0x03e00008, 0x27bd0030, 0x27bdffd0, 0xafbf0028,
4905 0xafb30024, 0xafb20020, 0xafb1001c, 0xafb00018, 0x8f725c9c, 0x3c0200ff,
4906 0x3442fff8, 0x3c070800, 0x24e71bb4, 0x02428824, 0x9623000e, 0x8ce20000,
4907 0x00431021, 0xace20000, 0x8e220010, 0x30420020, 0x14400011, 0x00809821,
4908 0x0e00063b, 0x02202021, 0x3c02c000, 0x02421825, 0xaf635c9c, 0x8f625c90,
4909 0x30420002, 0x1040011e, 0x00000000, 0xaf635c9c, 0x8f625c90, 0x30420002,
4910 0x10400119, 0x00000000, 0x0a00020d, 0x00000000, 0x8e240008, 0x8e230014,
4911 0x00041402, 0x000231c0, 0x00031502, 0x304201ff, 0x2442ffff, 0x3042007f,
4912 0x00031942, 0x30637800, 0x00021100, 0x24424000, 0x00624821, 0x9522000a,
4913 0x3084ffff, 0x30420008, 0x104000b0, 0x000429c0, 0x3c020800, 0x8c422400,
4914 0x14400024, 0x24c50008, 0x94c20014, 0x3c010800, 0xa42223d0, 0x8cc40010,
4915 0x00041402, 0x3c010800, 0xa42223d2, 0x3c010800, 0xa42423d4, 0x94c2000e,
4916 0x3083ffff, 0x00431023, 0x3c010800, 0xac222408, 0x94c2001a, 0x3c010800,
4917 0xac262400, 0x3c010800, 0xac322404, 0x3c010800, 0xac2223fc, 0x3c02c000,
4918 0x02421825, 0xaf635c9c, 0x8f625c90, 0x30420002, 0x104000e5, 0x00000000,
4919 0xaf635c9c, 0x8f625c90, 0x30420002, 0x104000e0, 0x00000000, 0x0a000246,
4920 0x00000000, 0x94c2000e, 0x3c030800, 0x946323d4, 0x00434023, 0x3103ffff,
4921 0x2c620008, 0x1040001c, 0x00000000, 0x94c20014, 0x24420028, 0x00a22821,
4922 0x00031042, 0x1840000b, 0x00002021, 0x24e60848, 0x00403821, 0x94a30000,
4923 0x8cc20000, 0x24840001, 0x00431021, 0xacc20000, 0x0087102a, 0x1440fff9,
4924 0x24a50002, 0x31020001, 0x1040001f, 0x3c024000, 0x3c040800, 0x248423fc,
4925 0xa0a00001, 0x94a30000, 0x8c820000, 0x00431021, 0x0a000285, 0xac820000,
4926 0x8f626800, 0x3c030010, 0x00431024, 0x10400009, 0x00000000, 0x94c2001a,
4927 0x3c030800, 0x8c6323fc, 0x00431021, 0x3c010800, 0xac2223fc, 0x0a000286,
4928 0x3c024000, 0x94c2001a, 0x94c4001c, 0x3c030800, 0x8c6323fc, 0x00441023,
4929 0x00621821, 0x3c010800, 0xac2323fc, 0x3c024000, 0x02421825, 0xaf635c9c,
4930 0x8f625c90, 0x30420002, 0x1440fffc, 0x00000000, 0x9522000a, 0x30420010,
4931 0x1040009b, 0x00000000, 0x3c030800, 0x946323d4, 0x3c070800, 0x24e72400,
4932 0x8ce40000, 0x8f626800, 0x24630030, 0x00832821, 0x3c030010, 0x00431024,
4933 0x1440000a, 0x00000000, 0x94a20004, 0x3c040800, 0x8c842408, 0x3c030800,
4934 0x8c6323fc, 0x00441023, 0x00621821, 0x3c010800, 0xac2323fc, 0x3c040800,
4935 0x8c8423fc, 0x00041c02, 0x3082ffff, 0x00622021, 0x00041402, 0x00822021,
4936 0x00041027, 0xa4a20006, 0x3c030800, 0x8c632404, 0x3c0200ff, 0x3442fff8,
4937 0x00628824, 0x96220008, 0x24050001, 0x24034000, 0x000231c0, 0x00801021,
4938 0xa4c2001a, 0xa4c0001c, 0xace00000, 0x3c010800, 0xac251b60, 0xaf635cb8,
4939 0x8f625cb0, 0x30420002, 0x10400003, 0x00000000, 0x3c010800, 0xac201b60,
4940 0x8e220008, 0xaf625cb8, 0x8f625cb0, 0x30420002, 0x10400003, 0x00000000,
4941 0x3c010800, 0xac201b60, 0x3c020800, 0x8c421b60, 0x1040ffec, 0x00000000,
4942 0x3c040800, 0x0e00063b, 0x8c842404, 0x0a00032a, 0x00000000, 0x3c030800,
4943 0x90631b98, 0x24020002, 0x14620003, 0x3c034b65, 0x0a0002e1, 0x00008021,
4944 0x8e22001c, 0x34637654, 0x10430002, 0x24100002, 0x24100001, 0x00c02021,
4945 0x0e000350, 0x02003021, 0x24020003, 0x3c010800, 0xa0221b98, 0x24020002,
4946 0x1202000a, 0x24020001, 0x3c030800, 0x8c6323f0, 0x10620006, 0x00000000,
4947 0x3c020800, 0x944223d8, 0x00021400, 0x0a00031f, 0xae220014, 0x3c040800,
4948 0x248423da, 0x94820000, 0x00021400, 0xae220014, 0x3c020800, 0x8c421bbc,
4949 0x3c03c000, 0x3c010800, 0xa0201b98, 0x00431025, 0xaf625c5c, 0x8f625c50,
4950 0x30420002, 0x10400009, 0x00000000, 0x2484f7e2, 0x8c820000, 0x00431025,
4951 0xaf625c5c, 0x8f625c50, 0x30420002, 0x1440fffa, 0x00000000, 0x3c020800,
4952 0x24421b84, 0x8c430000, 0x24630001, 0xac430000, 0x8f630c14, 0x3063000f,
4953 0x2c620002, 0x1440000c, 0x3c024000, 0x8f630c14, 0x3c020800, 0x8c421b40,
4954 0x3063000f, 0x24420001, 0x3c010800, 0xac221b40, 0x2c620002, 0x1040fff7,
4955 0x00000000, 0x3c024000, 0x02421825, 0xaf635c9c, 0x8f625c90, 0x30420002,
4956 0x1440fffc, 0x00000000, 0x12600003, 0x00000000, 0x0e0004c0, 0x00000000,
4957 0x8fbf0028, 0x8fb30024, 0x8fb20020, 0x8fb1001c, 0x8fb00018, 0x03e00008,
4958 0x27bd0030, 0x8f634450, 0x3c040800, 0x24841b88, 0x8c820000, 0x00031c02,
4959 0x0043102b, 0x14400007, 0x3c038000, 0x8c840004, 0x8f624450, 0x00021c02,
4960 0x0083102b, 0x1040fffc, 0x3c038000, 0xaf634444, 0x8f624444, 0x00431024,
4961 0x1440fffd, 0x00000000, 0x8f624448, 0x03e00008, 0x3042ffff, 0x3c024000,
4962 0x00822025, 0xaf645c38, 0x8f625c30, 0x30420002, 0x1440fffc, 0x00000000,
4963 0x03e00008, 0x00000000, 0x27bdffe0, 0x00805821, 0x14c00011, 0x256e0008,
4964 0x3c020800, 0x8c4223f4, 0x10400007, 0x24020016, 0x3c010800, 0xa42223d2,
4965 0x2402002a, 0x3c010800, 0x0a000364, 0xa42223d4, 0x8d670010, 0x00071402,
4966 0x3c010800, 0xa42223d2, 0x3c010800, 0xa42723d4, 0x3c040800, 0x948423d4,
4967 0x3c030800, 0x946323d2, 0x95cf0006, 0x3c020800, 0x944223d0, 0x00832023,
4968 0x01e2c023, 0x3065ffff, 0x24a20028, 0x01c24821, 0x3082ffff, 0x14c0001a,
4969 0x01226021, 0x9582000c, 0x3042003f, 0x3c010800, 0xa42223d6, 0x95820004,
4970 0x95830006, 0x3c010800, 0xac2023e4, 0x3c010800, 0xac2023e8, 0x00021400,
4971 0x00431025, 0x3c010800, 0xac221bc0, 0x95220004, 0x3c010800, 0xa4221bc4,
4972 0x95230002, 0x01e51023, 0x0043102a, 0x10400010, 0x24020001, 0x3c010800,
4973 0x0a000398, 0xac2223f8, 0x3c030800, 0x8c6323e8, 0x3c020800, 0x94421bc4,
4974 0x00431021, 0xa5220004, 0x3c020800, 0x94421bc0, 0xa5820004, 0x3c020800,
4975 0x8c421bc0, 0xa5820006, 0x3c020800, 0x8c4223f0, 0x3c0d0800, 0x8dad23e4,
4976 0x3c0a0800, 0x144000e5, 0x8d4a23e8, 0x3c020800, 0x94421bc4, 0x004a1821,
4977 0x3063ffff, 0x0062182b, 0x24020002, 0x10c2000d, 0x01435023, 0x3c020800,
4978 0x944223d6, 0x30420009, 0x10400008, 0x00000000, 0x9582000c, 0x3042fff6,
4979 0xa582000c, 0x3c020800, 0x944223d6, 0x30420009, 0x01a26823, 0x3c020800,
4980 0x8c4223f8, 0x1040004a, 0x01203821, 0x3c020800, 0x944223d2, 0x00004021,
4981 0xa520000a, 0x01e21023, 0xa5220002, 0x3082ffff, 0x00021042, 0x18400008,
4982 0x00003021, 0x00401821, 0x94e20000, 0x25080001, 0x00c23021, 0x0103102a,
4983 0x1440fffb, 0x24e70002, 0x00061c02, 0x30c2ffff, 0x00623021, 0x00061402,
4984 0x00c23021, 0x00c02821, 0x00061027, 0xa522000a, 0x00003021, 0x2527000c,
4985 0x00004021, 0x94e20000, 0x25080001, 0x00c23021, 0x2d020004, 0x1440fffb,
4986 0x24e70002, 0x95220002, 0x00004021, 0x91230009, 0x00442023, 0x01803821,
4987 0x3082ffff, 0xa4e00010, 0x00621821, 0x00021042, 0x18400010, 0x00c33021,
4988 0x00404821, 0x94e20000, 0x24e70002, 0x00c23021, 0x30e2007f, 0x14400006,
4989 0x25080001, 0x8d630000, 0x3c02007f, 0x3442ff80, 0x00625824, 0x25670008,
4990 0x0109102a, 0x1440fff3, 0x00000000, 0x30820001, 0x10400005, 0x00061c02,
4991 0xa0e00001, 0x94e20000, 0x00c23021, 0x00061c02, 0x30c2ffff, 0x00623021,
4992 0x00061402, 0x00c23021, 0x0a00047d, 0x30c6ffff, 0x24020002, 0x14c20081,
4993 0x00000000, 0x3c020800, 0x8c42240c, 0x14400007, 0x00000000, 0x3c020800,
4994 0x944223d2, 0x95230002, 0x01e21023, 0x10620077, 0x00000000, 0x3c020800,
4995 0x944223d2, 0x01e21023, 0xa5220002, 0x3c020800, 0x8c42240c, 0x1040001a,
4996 0x31e3ffff, 0x8dc70010, 0x3c020800, 0x94421b96, 0x00e04021, 0x00072c02,
4997 0x00aa2021, 0x00431023, 0x00823823, 0x00072402, 0x30e2ffff, 0x00823821,
4998 0x00071027, 0xa522000a, 0x3102ffff, 0x3c040800, 0x948423d4, 0x00453023,
4999 0x00e02821, 0x00641823, 0x006d1821, 0x00c33021, 0x00061c02, 0x30c2ffff,
5000 0x0a00047d, 0x00623021, 0x01203821, 0x00004021, 0x3082ffff, 0x00021042,
5001 0x18400008, 0x00003021, 0x00401821, 0x94e20000, 0x25080001, 0x00c23021,
5002 0x0103102a, 0x1440fffb, 0x24e70002, 0x00061c02, 0x30c2ffff, 0x00623021,
5003 0x00061402, 0x00c23021, 0x00c02821, 0x00061027, 0xa522000a, 0x00003021,
5004 0x2527000c, 0x00004021, 0x94e20000, 0x25080001, 0x00c23021, 0x2d020004,
5005 0x1440fffb, 0x24e70002, 0x95220002, 0x00004021, 0x91230009, 0x00442023,
5006 0x01803821, 0x3082ffff, 0xa4e00010, 0x3c040800, 0x948423d4, 0x00621821,
5007 0x00c33021, 0x00061c02, 0x30c2ffff, 0x00623021, 0x00061c02, 0x3c020800,
5008 0x944223d0, 0x00c34821, 0x00441023, 0x00021fc2, 0x00431021, 0x00021043,
5009 0x18400010, 0x00003021, 0x00402021, 0x94e20000, 0x24e70002, 0x00c23021,
5010 0x30e2007f, 0x14400006, 0x25080001, 0x8d630000, 0x3c02007f, 0x3442ff80,
5011 0x00625824, 0x25670008, 0x0104102a, 0x1440fff3, 0x00000000, 0x3c020800,
5012 0x944223ec, 0x00c23021, 0x3122ffff, 0x00c23021, 0x00061c02, 0x30c2ffff,
5013 0x00623021, 0x00061402, 0x00c23021, 0x00c04021, 0x00061027, 0xa5820010,
5014 0xadc00014, 0x0a00049d, 0xadc00000, 0x8dc70010, 0x00e04021, 0x11400007,
5015 0x00072c02, 0x00aa3021, 0x00061402, 0x30c3ffff, 0x00433021, 0x00061402,
5016 0x00c22821, 0x00051027, 0xa522000a, 0x3c030800, 0x946323d4, 0x3102ffff,
5017 0x01e21021, 0x00433023, 0x00cd3021, 0x00061c02, 0x30c2ffff, 0x00623021,
5018 0x00061402, 0x00c23021, 0x00c04021, 0x00061027, 0xa5820010, 0x3102ffff,
5019 0x00051c00, 0x00431025, 0xadc20010, 0x3c020800, 0x8c4223f4, 0x10400005,
5020 0x2de205eb, 0x14400002, 0x25e2fff2, 0x34028870, 0xa5c20034, 0x3c030800,
5021 0x246323e8, 0x8c620000, 0x24420001, 0xac620000, 0x3c040800, 0x8c8423e4,
5022 0x3c020800, 0x8c421bc0, 0x3303ffff, 0x00832021, 0x00431821, 0x0062102b,
5023 0x3c010800, 0xac2423e4, 0x10400003, 0x2482ffff, 0x3c010800, 0xac2223e4,
5024 0x3c010800, 0xac231bc0, 0x03e00008, 0x27bd0020, 0x27bdffb8, 0x3c050800,
5025 0x24a51b96, 0xafbf0044, 0xafbe0040, 0xafb7003c, 0xafb60038, 0xafb50034,
5026 0xafb40030, 0xafb3002c, 0xafb20028, 0xafb10024, 0xafb00020, 0x94a90000,
5027 0x3c020800, 0x944223d0, 0x3c030800, 0x8c631bb0, 0x3c040800, 0x8c841bac,
5028 0x01221023, 0x0064182a, 0xa7a9001e, 0x106000be, 0xa7a20016, 0x24be0022,
5029 0x97b6001e, 0x24b3001a, 0x24b70016, 0x8fc20000, 0x14400008, 0x00000000,
5030 0x8fc2fff8, 0x97a30016, 0x8fc4fff4, 0x00431021, 0x0082202a, 0x148000b0,
5031 0x00000000, 0x97d50818, 0x32a2ffff, 0x104000a3, 0x00009021, 0x0040a021,
5032 0x00008821, 0x0e000625, 0x00000000, 0x00403021, 0x14c00007, 0x00000000,
5033 0x3c020800, 0x8c4223dc, 0x24420001, 0x3c010800, 0x0a000596, 0xac2223dc,
5034 0x3c100800, 0x02118021, 0x8e101bc8, 0x9608000a, 0x31020040, 0x10400005,
5035 0x2407180c, 0x8e02000c, 0x2407188c, 0x00021400, 0xacc20018, 0x31020080,
5036 0x54400001, 0x34e70010, 0x3c020800, 0x00511021, 0x8c421bd0, 0x3c030800,
5037 0x00711821, 0x8c631bd4, 0x00021500, 0x00031c00, 0x00431025, 0xacc20014,
5038 0x96040008, 0x3242ffff, 0x00821021, 0x0282102a, 0x14400002, 0x02b22823,
5039 0x00802821, 0x8e020000, 0x02459021, 0xacc20000, 0x8e020004, 0x00c02021,
5040 0x26310010, 0xac820004, 0x30e2ffff, 0xac800008, 0xa485000e, 0xac820010,
5041 0x24020305, 0x0e0005a2, 0xa482000c, 0x3242ffff, 0x0054102b, 0x1440ffc5,
5042 0x3242ffff, 0x0a00058e, 0x00000000, 0x8e620000, 0x8e63fffc, 0x0043102a,
5043 0x10400067, 0x00000000, 0x8e62fff0, 0x00028900, 0x3c100800, 0x02118021,
5044 0x0e000625, 0x8e101bc8, 0x00403021, 0x14c00005, 0x00000000, 0x8e62082c,
5045 0x24420001, 0x0a000596, 0xae62082c, 0x9608000a, 0x31020040, 0x10400005,
5046 0x2407180c, 0x8e02000c, 0x2407188c, 0x00021400, 0xacc20018, 0x3c020800,
5047 0x00511021, 0x8c421bd0, 0x3c030800, 0x00711821, 0x8c631bd4, 0x00021500,
5048 0x00031c00, 0x00431025, 0xacc20014, 0x8e63fff4, 0x96020008, 0x00432023,
5049 0x3242ffff, 0x3083ffff, 0x00431021, 0x02c2102a, 0x10400003, 0x00802821,
5050 0x97a9001e, 0x01322823, 0x8e620000, 0x30a4ffff, 0x00441021, 0xae620000,
5051 0xa4c5000e, 0x8e020000, 0xacc20000, 0x8e020004, 0x8e63fff4, 0x00431021,
5052 0xacc20004, 0x8e63fff4, 0x96020008, 0x00641821, 0x0062102a, 0x14400006,
5053 0x02459021, 0x8e62fff0, 0xae60fff4, 0x24420001, 0x0a000571, 0xae62fff0,
5054 0xae63fff4, 0xacc00008, 0x3242ffff, 0x10560003, 0x31020004, 0x10400006,
5055 0x24020305, 0x31020080, 0x54400001, 0x34e70010, 0x34e70020, 0x24020905,
5056 0xa4c2000c, 0x8ee30000, 0x8ee20004, 0x14620007, 0x3c02b49a, 0x8ee20860,
5057 0x54400001, 0x34e70400, 0x3c024b65, 0x0a000588, 0x34427654, 0x344289ab,
5058 0xacc2001c, 0x30e2ffff, 0xacc20010, 0x0e0005a2, 0x00c02021, 0x3242ffff,
5059 0x0056102b, 0x1440ff9b, 0x00000000, 0x8e620000, 0x8e63fffc, 0x0043102a,
5060 0x1440ff48, 0x00000000, 0x8fbf0044, 0x8fbe0040, 0x8fb7003c, 0x8fb60038,
5061 0x8fb50034, 0x8fb40030, 0x8fb3002c, 0x8fb20028, 0x8fb10024, 0x8fb00020,
5062 0x03e00008, 0x27bd0048, 0x27bdffe8, 0xafbf0014, 0xafb00010, 0x8f624450,
5063 0x8f634410, 0x0a0005b1, 0x00808021, 0x8f626820, 0x30422000, 0x10400003,
5064 0x00000000, 0x0e0001f0, 0x00002021, 0x8f624450, 0x8f634410, 0x3042ffff,
5065 0x0043102b, 0x1440fff5, 0x00000000, 0x8f630c14, 0x3063000f, 0x2c620002,
5066 0x1440000b, 0x00000000, 0x8f630c14, 0x3c020800, 0x8c421b40, 0x3063000f,
5067 0x24420001, 0x3c010800, 0xac221b40, 0x2c620002, 0x1040fff7, 0x00000000,
5068 0xaf705c18, 0x8f625c10, 0x30420002, 0x10400009, 0x00000000, 0x8f626820,
5069 0x30422000, 0x1040fff8, 0x00000000, 0x0e0001f0, 0x00002021, 0x0a0005c4,
5070 0x00000000, 0x8fbf0014, 0x8fb00010, 0x03e00008, 0x27bd0018, 0x00000000,
5071 0x00000000, 0x00000000, 0x27bdffe8, 0x3c1bc000, 0xafbf0014, 0xafb00010,
5072 0xaf60680c, 0x8f626804, 0x34420082, 0xaf626804, 0x8f634000, 0x24020b50,
5073 0x3c010800, 0xac221b54, 0x24020b78, 0x3c010800, 0xac221b64, 0x34630002,
5074 0xaf634000, 0x0e000605, 0x00808021, 0x3c010800, 0xa0221b68, 0x304200ff,
5075 0x24030002, 0x14430005, 0x00000000, 0x3c020800, 0x8c421b54, 0x0a0005f8,
5076 0xac5000c0, 0x3c020800, 0x8c421b54, 0xac5000bc, 0x8f624434, 0x8f634438,
5077 0x8f644410, 0x3c010800, 0xac221b5c, 0x3c010800, 0xac231b6c, 0x3c010800,
5078 0xac241b58, 0x8fbf0014, 0x8fb00010, 0x03e00008, 0x27bd0018, 0x3c040800,
5079 0x8c870000, 0x3c03aa55, 0x3463aa55, 0x3c06c003, 0xac830000, 0x8cc20000,
5080 0x14430007, 0x24050002, 0x3c0355aa, 0x346355aa, 0xac830000, 0x8cc20000,
5081 0x50430001, 0x24050001, 0x3c020800, 0xac470000, 0x03e00008, 0x00a01021,
5082 0x27bdfff8, 0x18800009, 0x00002821, 0x8f63680c, 0x8f62680c, 0x1043fffe,
5083 0x00000000, 0x24a50001, 0x00a4102a, 0x1440fff9, 0x00000000, 0x03e00008,
5084 0x27bd0008, 0x8f634450, 0x3c020800, 0x8c421b5c, 0x00031c02, 0x0043102b,
5085 0x14400008, 0x3c038000, 0x3c040800, 0x8c841b6c, 0x8f624450, 0x00021c02,
5086 0x0083102b, 0x1040fffc, 0x3c038000, 0xaf634444, 0x8f624444, 0x00431024,
5087 0x1440fffd, 0x00000000, 0x8f624448, 0x03e00008, 0x3042ffff, 0x3082ffff,
5088 0x2442e000, 0x2c422001, 0x14400003, 0x3c024000, 0x0a000648, 0x2402ffff,
5089 0x00822025, 0xaf645c38, 0x8f625c30, 0x30420002, 0x1440fffc, 0x00001021,
5090 0x03e00008, 0x00000000, 0x8f624450, 0x3c030800, 0x8c631b58, 0x0a000651,
5091 0x3042ffff, 0x8f624450, 0x3042ffff, 0x0043102b, 0x1440fffc, 0x00000000,
5092 0x03e00008, 0x00000000, 0x27bdffe0, 0x00802821, 0x3c040800, 0x24841af0,
5093 0x00003021, 0x00003821, 0xafbf0018, 0xafa00010, 0x0e00067c, 0xafa00014,
5094 0x0a000660, 0x00000000, 0x8fbf0018, 0x03e00008, 0x27bd0020, 0x00000000,
5095 0x00000000, 0x00000000, 0x3c020800, 0x34423000, 0x3c030800, 0x34633000,
5096 0x3c040800, 0x348437ff, 0x3c010800, 0xac221b74, 0x24020040, 0x3c010800,
5097 0xac221b78, 0x3c010800, 0xac201b70, 0xac600000, 0x24630004, 0x0083102b,
5098 0x5040fffd, 0xac600000, 0x03e00008, 0x00000000, 0x00804821, 0x8faa0010,
5099 0x3c020800, 0x8c421b70, 0x3c040800, 0x8c841b78, 0x8fab0014, 0x24430001,
5100 0x0044102b, 0x3c010800, 0xac231b70, 0x14400003, 0x00004021, 0x3c010800,
5101 0xac201b70, 0x3c020800, 0x8c421b70, 0x3c030800, 0x8c631b74, 0x91240000,
5102 0x00021140, 0x00431021, 0x00481021, 0x25080001, 0xa0440000, 0x29020008,
5103 0x1440fff4, 0x25290001, 0x3c020800, 0x8c421b70, 0x3c030800, 0x8c631b74,
5104 0x8f64680c, 0x00021140, 0x00431021, 0xac440008, 0xac45000c, 0xac460010,
5105 0xac470014, 0xac4a0018, 0x03e00008, 0xac4b001c, 0x00000000, 0x00000000,
5106};
5107
5108static u32 tg3TsoFwRodata[] = {
5109 0x4d61696e, 0x43707542, 0x00000000, 0x4d61696e, 0x43707541, 0x00000000,
5110 0x00000000, 0x00000000, 0x73746b6f, 0x66666c64, 0x496e0000, 0x73746b6f,
5111 0x66662a2a, 0x00000000, 0x53774576, 0x656e7430, 0x00000000, 0x00000000,
5112 0x00000000, 0x00000000, 0x66617461, 0x6c457272, 0x00000000, 0x00000000,
5113 0x00000000,
5114};
5115
5116static u32 tg3TsoFwData[] = {
5117 0x00000000, 0x73746b6f, 0x66666c64, 0x5f76312e, 0x362e3000, 0x00000000,
5118 0x00000000, 0x00000000, 0x00000000, 0x00000000, 0x00000000, 0x00000000,
5119 0x00000000,
5120};
5121
5122/* 5705 needs a special version of the TSO firmware. */
5123#define TG3_TSO5_FW_RELEASE_MAJOR 0x1
5124#define TG3_TSO5_FW_RELASE_MINOR 0x2
5125#define TG3_TSO5_FW_RELEASE_FIX 0x0
5126#define TG3_TSO5_FW_START_ADDR 0x00010000
5127#define TG3_TSO5_FW_TEXT_ADDR 0x00010000
5128#define TG3_TSO5_FW_TEXT_LEN 0xe90
5129#define TG3_TSO5_FW_RODATA_ADDR 0x00010e90
5130#define TG3_TSO5_FW_RODATA_LEN 0x50
5131#define TG3_TSO5_FW_DATA_ADDR 0x00010f00
5132#define TG3_TSO5_FW_DATA_LEN 0x20
5133#define TG3_TSO5_FW_SBSS_ADDR 0x00010f20
5134#define TG3_TSO5_FW_SBSS_LEN 0x28
5135#define TG3_TSO5_FW_BSS_ADDR 0x00010f50
5136#define TG3_TSO5_FW_BSS_LEN 0x88
5137
5138static u32 tg3Tso5FwText[(TG3_TSO5_FW_TEXT_LEN / 4) + 1] = {
5139 0x0c004003, 0x00000000, 0x00010f04, 0x00000000, 0x10000003, 0x00000000,
5140 0x0000000d, 0x0000000d, 0x3c1d0001, 0x37bde000, 0x03a0f021, 0x3c100001,
5141 0x26100000, 0x0c004010, 0x00000000, 0x0000000d, 0x27bdffe0, 0x3c04fefe,
5142 0xafbf0018, 0x0c0042e8, 0x34840002, 0x0c004364, 0x00000000, 0x3c030001,
5143 0x90630f34, 0x24020002, 0x3c040001, 0x24840e9c, 0x14620003, 0x24050001,
5144 0x3c040001, 0x24840e90, 0x24060002, 0x00003821, 0xafa00010, 0x0c004378,
5145 0xafa00014, 0x0c00402c, 0x00000000, 0x8fbf0018, 0x03e00008, 0x27bd0020,
5146 0x00000000, 0x00000000, 0x27bdffe0, 0xafbf001c, 0xafb20018, 0xafb10014,
5147 0x0c0042d4, 0xafb00010, 0x3c128000, 0x24110001, 0x8f706810, 0x32020400,
5148 0x10400007, 0x00000000, 0x8f641008, 0x00921024, 0x14400003, 0x00000000,
5149 0x0c004064, 0x00000000, 0x3c020001, 0x90420f56, 0x10510003, 0x32020200,
5150 0x1040fff1, 0x00000000, 0x0c0041b4, 0x00000000, 0x08004034, 0x00000000,
5151 0x8fbf001c, 0x8fb20018, 0x8fb10014, 0x8fb00010, 0x03e00008, 0x27bd0020,
5152 0x27bdffe0, 0x3c040001, 0x24840eb0, 0x00002821, 0x00003021, 0x00003821,
5153 0xafbf0018, 0xafa00010, 0x0c004378, 0xafa00014, 0x0000d021, 0x24020130,
5154 0xaf625000, 0x3c010001, 0xa4200f50, 0x3c010001, 0xa0200f57, 0x8fbf0018,
5155 0x03e00008, 0x27bd0020, 0x00000000, 0x00000000, 0x3c030001, 0x24630f60,
5156 0x90620000, 0x27bdfff0, 0x14400003, 0x0080c021, 0x08004073, 0x00004821,
5157 0x3c022000, 0x03021024, 0x10400003, 0x24090002, 0x08004073, 0xa0600000,
5158 0x24090001, 0x00181040, 0x30431f80, 0x346f8008, 0x1520004b, 0x25eb0028,
5159 0x3c040001, 0x00832021, 0x8c848010, 0x3c050001, 0x24a50f7a, 0x00041402,
5160 0xa0a20000, 0x3c010001, 0xa0240f7b, 0x3c020001, 0x00431021, 0x94428014,
5161 0x3c010001, 0xa0220f7c, 0x3c0c0001, 0x01836021, 0x8d8c8018, 0x304200ff,
5162 0x24420008, 0x000220c3, 0x24020001, 0x3c010001, 0xa0220f60, 0x0124102b,
5163 0x1040000c, 0x00003821, 0x24a6000e, 0x01602821, 0x8ca20000, 0x8ca30004,
5164 0x24a50008, 0x24e70001, 0xacc20000, 0xacc30004, 0x00e4102b, 0x1440fff8,
5165 0x24c60008, 0x00003821, 0x3c080001, 0x25080f7b, 0x91060000, 0x3c020001,
5166 0x90420f7c, 0x2503000d, 0x00c32821, 0x00461023, 0x00021fc2, 0x00431021,
5167 0x00021043, 0x1840000c, 0x00002021, 0x91020001, 0x00461023, 0x00021fc2,
5168 0x00431021, 0x00021843, 0x94a20000, 0x24e70001, 0x00822021, 0x00e3102a,
5169 0x1440fffb, 0x24a50002, 0x00041c02, 0x3082ffff, 0x00622021, 0x00041402,
5170 0x00822021, 0x3c02ffff, 0x01821024, 0x3083ffff, 0x00431025, 0x3c010001,
5171 0x080040fa, 0xac220f80, 0x3c050001, 0x24a50f7c, 0x90a20000, 0x3c0c0001,
5172 0x01836021, 0x8d8c8018, 0x000220c2, 0x1080000e, 0x00003821, 0x01603021,
5173 0x24a5000c, 0x8ca20000, 0x8ca30004, 0x24a50008, 0x24e70001, 0xacc20000,
5174 0xacc30004, 0x00e4102b, 0x1440fff8, 0x24c60008, 0x3c050001, 0x24a50f7c,
5175 0x90a20000, 0x30430007, 0x24020004, 0x10620011, 0x28620005, 0x10400005,
5176 0x24020002, 0x10620008, 0x000710c0, 0x080040fa, 0x00000000, 0x24020006,
5177 0x1062000e, 0x000710c0, 0x080040fa, 0x00000000, 0x00a21821, 0x9463000c,
5178 0x004b1021, 0x080040fa, 0xa4430000, 0x000710c0, 0x00a21821, 0x8c63000c,
5179 0x004b1021, 0x080040fa, 0xac430000, 0x00a21821, 0x8c63000c, 0x004b2021,
5180 0x00a21021, 0xac830000, 0x94420010, 0xa4820004, 0x95e70006, 0x3c020001,
5181 0x90420f7c, 0x3c030001, 0x90630f7a, 0x00e2c823, 0x3c020001, 0x90420f7b,
5182 0x24630028, 0x01e34021, 0x24420028, 0x15200012, 0x01e23021, 0x94c2000c,
5183 0x3c010001, 0xa4220f78, 0x94c20004, 0x94c30006, 0x3c010001, 0xa4200f76,
5184 0x3c010001, 0xa4200f72, 0x00021400, 0x00431025, 0x3c010001, 0xac220f6c,
5185 0x95020004, 0x3c010001, 0x08004124, 0xa4220f70, 0x3c020001, 0x94420f70,
5186 0x3c030001, 0x94630f72, 0x00431021, 0xa5020004, 0x3c020001, 0x94420f6c,
5187 0xa4c20004, 0x3c020001, 0x8c420f6c, 0xa4c20006, 0x3c040001, 0x94840f72,
5188 0x3c020001, 0x94420f70, 0x3c0a0001, 0x954a0f76, 0x00441821, 0x3063ffff,
5189 0x0062182a, 0x24020002, 0x1122000b, 0x00832023, 0x3c030001, 0x94630f78,
5190 0x30620009, 0x10400006, 0x3062fff6, 0xa4c2000c, 0x3c020001, 0x94420f78,
5191 0x30420009, 0x01425023, 0x24020001, 0x1122001b, 0x29220002, 0x50400005,
5192 0x24020002, 0x11200007, 0x31a2ffff, 0x08004197, 0x00000000, 0x1122001d,
5193 0x24020016, 0x08004197, 0x31a2ffff, 0x3c0e0001, 0x95ce0f80, 0x10800005,
5194 0x01806821, 0x01c42021, 0x00041c02, 0x3082ffff, 0x00627021, 0x000e1027,
5195 0xa502000a, 0x3c030001, 0x90630f7b, 0x31a2ffff, 0x00e21021, 0x0800418d,
5196 0x00432023, 0x3c020001, 0x94420f80, 0x00442021, 0x00041c02, 0x3082ffff,
5197 0x00622021, 0x00807021, 0x00041027, 0x08004185, 0xa502000a, 0x3c050001,
5198 0x24a50f7a, 0x90a30000, 0x14620002, 0x24e2fff2, 0xa5e20034, 0x90a20000,
5199 0x00e21023, 0xa5020002, 0x3c030001, 0x94630f80, 0x3c020001, 0x94420f5a,
5200 0x30e5ffff, 0x00641821, 0x00451023, 0x00622023, 0x00041c02, 0x3082ffff,
5201 0x00622021, 0x00041027, 0xa502000a, 0x3c030001, 0x90630f7c, 0x24620001,
5202 0x14a20005, 0x00807021, 0x01631021, 0x90420000, 0x08004185, 0x00026200,
5203 0x24620002, 0x14a20003, 0x306200fe, 0x004b1021, 0x944c0000, 0x3c020001,
5204 0x94420f82, 0x3183ffff, 0x3c040001, 0x90840f7b, 0x00431021, 0x00e21021,
5205 0x00442023, 0x008a2021, 0x00041c02, 0x3082ffff, 0x00622021, 0x00041402,
5206 0x00822021, 0x00806821, 0x00041027, 0xa4c20010, 0x31a2ffff, 0x000e1c00,
5207 0x00431025, 0x3c040001, 0x24840f72, 0xade20010, 0x94820000, 0x3c050001,
5208 0x94a50f76, 0x3c030001, 0x8c630f6c, 0x24420001, 0x00b92821, 0xa4820000,
5209 0x3322ffff, 0x00622021, 0x0083182b, 0x3c010001, 0xa4250f76, 0x10600003,
5210 0x24a2ffff, 0x3c010001, 0xa4220f76, 0x3c024000, 0x03021025, 0x3c010001,
5211 0xac240f6c, 0xaf621008, 0x03e00008, 0x27bd0010, 0x3c030001, 0x90630f56,
5212 0x27bdffe8, 0x24020001, 0xafbf0014, 0x10620026, 0xafb00010, 0x8f620cf4,
5213 0x2442ffff, 0x3042007f, 0x00021100, 0x8c434000, 0x3c010001, 0xac230f64,
5214 0x8c434008, 0x24444000, 0x8c5c4004, 0x30620040, 0x14400002, 0x24020088,
5215 0x24020008, 0x3c010001, 0xa4220f68, 0x30620004, 0x10400005, 0x24020001,
5216 0x3c010001, 0xa0220f57, 0x080041d5, 0x00031402, 0x3c010001, 0xa0200f57,
5217 0x00031402, 0x3c010001, 0xa4220f54, 0x9483000c, 0x24020001, 0x3c010001,
5218 0xa4200f50, 0x3c010001, 0xa0220f56, 0x3c010001, 0xa4230f62, 0x24020001,
5219 0x1342001e, 0x00000000, 0x13400005, 0x24020003, 0x13420067, 0x00000000,
5220 0x080042cf, 0x00000000, 0x3c020001, 0x94420f62, 0x241a0001, 0x3c010001,
5221 0xa4200f5e, 0x3c010001, 0xa4200f52, 0x304407ff, 0x00021bc2, 0x00031823,
5222 0x3063003e, 0x34630036, 0x00021242, 0x3042003c, 0x00621821, 0x3c010001,
5223 0xa4240f58, 0x00832021, 0x24630030, 0x3c010001, 0xa4240f5a, 0x3c010001,
5224 0xa4230f5c, 0x3c060001, 0x24c60f52, 0x94c50000, 0x94c30002, 0x3c040001,
5225 0x94840f5a, 0x00651021, 0x0044102a, 0x10400013, 0x3c108000, 0x00a31021,
5226 0xa4c20000, 0x3c02a000, 0xaf620cf4, 0x3c010001, 0xa0200f56, 0x8f641008,
5227 0x00901024, 0x14400003, 0x00000000, 0x0c004064, 0x00000000, 0x8f620cf4,
5228 0x00501024, 0x104000b7, 0x00000000, 0x0800420f, 0x00000000, 0x3c030001,
5229 0x94630f50, 0x00851023, 0xa4c40000, 0x00621821, 0x3042ffff, 0x3c010001,
5230 0xa4230f50, 0xaf620ce8, 0x3c020001, 0x94420f68, 0x34420024, 0xaf620cec,
5231 0x94c30002, 0x3c020001, 0x94420f50, 0x14620012, 0x3c028000, 0x3c108000,
5232 0x3c02a000, 0xaf620cf4, 0x3c010001, 0xa0200f56, 0x8f641008, 0x00901024,
5233 0x14400003, 0x00000000, 0x0c004064, 0x00000000, 0x8f620cf4, 0x00501024,
5234 0x1440fff7, 0x00000000, 0x080042cf, 0x241a0003, 0xaf620cf4, 0x3c108000,
5235 0x8f641008, 0x00901024, 0x14400003, 0x00000000, 0x0c004064, 0x00000000,
5236 0x8f620cf4, 0x00501024, 0x1440fff7, 0x00000000, 0x080042cf, 0x241a0003,
5237 0x3c070001, 0x24e70f50, 0x94e20000, 0x03821021, 0xaf620ce0, 0x3c020001,
5238 0x8c420f64, 0xaf620ce4, 0x3c050001, 0x94a50f54, 0x94e30000, 0x3c040001,
5239 0x94840f58, 0x3c020001, 0x94420f5e, 0x00a32823, 0x00822023, 0x30a6ffff,
5240 0x3083ffff, 0x00c3102b, 0x14400043, 0x00000000, 0x3c020001, 0x94420f5c,
5241 0x00021400, 0x00621025, 0xaf620ce8, 0x94e20000, 0x3c030001, 0x94630f54,
5242 0x00441021, 0xa4e20000, 0x3042ffff, 0x14430021, 0x3c020008, 0x3c020001,
5243 0x90420f57, 0x10400006, 0x3c03000c, 0x3c020001, 0x94420f68, 0x34630624,
5244 0x0800427c, 0x0000d021, 0x3c020001, 0x94420f68, 0x3c030008, 0x34630624,
5245 0x00431025, 0xaf620cec, 0x3c108000, 0x3c02a000, 0xaf620cf4, 0x3c010001,
5246 0xa0200f56, 0x8f641008, 0x00901024, 0x14400003, 0x00000000, 0x0c004064,
5247 0x00000000, 0x8f620cf4, 0x00501024, 0x10400015, 0x00000000, 0x08004283,
5248 0x00000000, 0x3c030001, 0x94630f68, 0x34420624, 0x3c108000, 0x00621825,
5249 0x3c028000, 0xaf630cec, 0xaf620cf4, 0x8f641008, 0x00901024, 0x14400003,
5250 0x00000000, 0x0c004064, 0x00000000, 0x8f620cf4, 0x00501024, 0x1440fff7,
5251 0x00000000, 0x3c010001, 0x080042cf, 0xa4200f5e, 0x3c020001, 0x94420f5c,
5252 0x00021400, 0x00c21025, 0xaf620ce8, 0x3c020001, 0x90420f57, 0x10400009,
5253 0x3c03000c, 0x3c020001, 0x94420f68, 0x34630624, 0x0000d021, 0x00431025,
5254 0xaf620cec, 0x080042c1, 0x3c108000, 0x3c020001, 0x94420f68, 0x3c030008,
5255 0x34630604, 0x00431025, 0xaf620cec, 0x3c020001, 0x94420f5e, 0x00451021,
5256 0x3c010001, 0xa4220f5e, 0x3c108000, 0x3c02a000, 0xaf620cf4, 0x3c010001,
5257 0xa0200f56, 0x8f641008, 0x00901024, 0x14400003, 0x00000000, 0x0c004064,
5258 0x00000000, 0x8f620cf4, 0x00501024, 0x1440fff7, 0x00000000, 0x8fbf0014,
5259 0x8fb00010, 0x03e00008, 0x27bd0018, 0x00000000, 0x27bdffe0, 0x3c040001,
5260 0x24840ec0, 0x00002821, 0x00003021, 0x00003821, 0xafbf0018, 0xafa00010,
5261 0x0c004378, 0xafa00014, 0x0000d021, 0x24020130, 0xaf625000, 0x3c010001,
5262 0xa4200f50, 0x3c010001, 0xa0200f57, 0x8fbf0018, 0x03e00008, 0x27bd0020,
5263 0x27bdffe8, 0x3c1bc000, 0xafbf0014, 0xafb00010, 0xaf60680c, 0x8f626804,
5264 0x34420082, 0xaf626804, 0x8f634000, 0x24020b50, 0x3c010001, 0xac220f20,
5265 0x24020b78, 0x3c010001, 0xac220f30, 0x34630002, 0xaf634000, 0x0c004315,
5266 0x00808021, 0x3c010001, 0xa0220f34, 0x304200ff, 0x24030002, 0x14430005,
5267 0x00000000, 0x3c020001, 0x8c420f20, 0x08004308, 0xac5000c0, 0x3c020001,
5268 0x8c420f20, 0xac5000bc, 0x8f624434, 0x8f634438, 0x8f644410, 0x3c010001,
5269 0xac220f28, 0x3c010001, 0xac230f38, 0x3c010001, 0xac240f24, 0x8fbf0014,
5270 0x8fb00010, 0x03e00008, 0x27bd0018, 0x03e00008, 0x24020001, 0x27bdfff8,
5271 0x18800009, 0x00002821, 0x8f63680c, 0x8f62680c, 0x1043fffe, 0x00000000,
5272 0x24a50001, 0x00a4102a, 0x1440fff9, 0x00000000, 0x03e00008, 0x27bd0008,
5273 0x8f634450, 0x3c020001, 0x8c420f28, 0x00031c02, 0x0043102b, 0x14400008,
5274 0x3c038000, 0x3c040001, 0x8c840f38, 0x8f624450, 0x00021c02, 0x0083102b,
5275 0x1040fffc, 0x3c038000, 0xaf634444, 0x8f624444, 0x00431024, 0x1440fffd,
5276 0x00000000, 0x8f624448, 0x03e00008, 0x3042ffff, 0x3082ffff, 0x2442e000,
5277 0x2c422001, 0x14400003, 0x3c024000, 0x08004347, 0x2402ffff, 0x00822025,
5278 0xaf645c38, 0x8f625c30, 0x30420002, 0x1440fffc, 0x00001021, 0x03e00008,
5279 0x00000000, 0x8f624450, 0x3c030001, 0x8c630f24, 0x08004350, 0x3042ffff,
5280 0x8f624450, 0x3042ffff, 0x0043102b, 0x1440fffc, 0x00000000, 0x03e00008,
5281 0x00000000, 0x27bdffe0, 0x00802821, 0x3c040001, 0x24840ed0, 0x00003021,
5282 0x00003821, 0xafbf0018, 0xafa00010, 0x0c004378, 0xafa00014, 0x0800435f,
5283 0x00000000, 0x8fbf0018, 0x03e00008, 0x27bd0020, 0x3c020001, 0x3442d600,
5284 0x3c030001, 0x3463d600, 0x3c040001, 0x3484ddff, 0x3c010001, 0xac220f40,
5285 0x24020040, 0x3c010001, 0xac220f44, 0x3c010001, 0xac200f3c, 0xac600000,
5286 0x24630004, 0x0083102b, 0x5040fffd, 0xac600000, 0x03e00008, 0x00000000,
5287 0x00804821, 0x8faa0010, 0x3c020001, 0x8c420f3c, 0x3c040001, 0x8c840f44,
5288 0x8fab0014, 0x24430001, 0x0044102b, 0x3c010001, 0xac230f3c, 0x14400003,
5289 0x00004021, 0x3c010001, 0xac200f3c, 0x3c020001, 0x8c420f3c, 0x3c030001,
5290 0x8c630f40, 0x91240000, 0x00021140, 0x00431021, 0x00481021, 0x25080001,
5291 0xa0440000, 0x29020008, 0x1440fff4, 0x25290001, 0x3c020001, 0x8c420f3c,
5292 0x3c030001, 0x8c630f40, 0x8f64680c, 0x00021140, 0x00431021, 0xac440008,
5293 0xac45000c, 0xac460010, 0xac470014, 0xac4a0018, 0x03e00008, 0xac4b001c,
5294 0x00000000, 0x00000000, 0x00000000,
5295};
5296
5297static u32 tg3Tso5FwRodata[(TG3_TSO5_FW_RODATA_LEN / 4) + 1] = {
5298 0x4d61696e, 0x43707542, 0x00000000, 0x4d61696e, 0x43707541, 0x00000000,
5299 0x00000000, 0x00000000, 0x73746b6f, 0x66666c64, 0x00000000, 0x00000000,
5300 0x73746b6f, 0x66666c64, 0x00000000, 0x00000000, 0x66617461, 0x6c457272,
5301 0x00000000, 0x00000000, 0x00000000,
5302};
5303
5304static u32 tg3Tso5FwData[(TG3_TSO5_FW_DATA_LEN / 4) + 1] = {
5305 0x00000000, 0x73746b6f, 0x66666c64, 0x5f76312e, 0x322e3000, 0x00000000,
5306 0x00000000, 0x00000000, 0x00000000,
5307};
5308
5309/* tp->lock is held. */
5310static int tg3_load_tso_firmware(struct tg3 *tp)
5311{
5312 struct fw_info info;
5313 unsigned long cpu_base, cpu_scratch_base, cpu_scratch_size;
5314 int err, i;
5315
5316 if (tp->tg3_flags2 & TG3_FLG2_HW_TSO)
5317 return 0;
5318
5319 if (GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_5705) {
5320 info.text_base = TG3_TSO5_FW_TEXT_ADDR;
5321 info.text_len = TG3_TSO5_FW_TEXT_LEN;
5322 info.text_data = &tg3Tso5FwText[0];
5323 info.rodata_base = TG3_TSO5_FW_RODATA_ADDR;
5324 info.rodata_len = TG3_TSO5_FW_RODATA_LEN;
5325 info.rodata_data = &tg3Tso5FwRodata[0];
5326 info.data_base = TG3_TSO5_FW_DATA_ADDR;
5327 info.data_len = TG3_TSO5_FW_DATA_LEN;
5328 info.data_data = &tg3Tso5FwData[0];
5329 cpu_base = RX_CPU_BASE;
5330 cpu_scratch_base = NIC_SRAM_MBUF_POOL_BASE5705;
5331 cpu_scratch_size = (info.text_len +
5332 info.rodata_len +
5333 info.data_len +
5334 TG3_TSO5_FW_SBSS_LEN +
5335 TG3_TSO5_FW_BSS_LEN);
5336 } else {
5337 info.text_base = TG3_TSO_FW_TEXT_ADDR;
5338 info.text_len = TG3_TSO_FW_TEXT_LEN;
5339 info.text_data = &tg3TsoFwText[0];
5340 info.rodata_base = TG3_TSO_FW_RODATA_ADDR;
5341 info.rodata_len = TG3_TSO_FW_RODATA_LEN;
5342 info.rodata_data = &tg3TsoFwRodata[0];
5343 info.data_base = TG3_TSO_FW_DATA_ADDR;
5344 info.data_len = TG3_TSO_FW_DATA_LEN;
5345 info.data_data = &tg3TsoFwData[0];
5346 cpu_base = TX_CPU_BASE;
5347 cpu_scratch_base = TX_CPU_SCRATCH_BASE;
5348 cpu_scratch_size = TX_CPU_SCRATCH_SIZE;
5349 }
5350
5351 err = tg3_load_firmware_cpu(tp, cpu_base,
5352 cpu_scratch_base, cpu_scratch_size,
5353 &info);
5354 if (err)
5355 return err;
5356
5357 /* Now startup the cpu. */
5358 tw32(cpu_base + CPU_STATE, 0xffffffff);
5359 tw32_f(cpu_base + CPU_PC, info.text_base);
5360
5361 for (i = 0; i < 5; i++) {
5362 if (tr32(cpu_base + CPU_PC) == info.text_base)
5363 break;
5364 tw32(cpu_base + CPU_STATE, 0xffffffff);
5365 tw32(cpu_base + CPU_MODE, CPU_MODE_HALT);
5366 tw32_f(cpu_base + CPU_PC, info.text_base);
5367 udelay(1000);
5368 }
5369 if (i >= 5) {
5370 printk(KERN_ERR PFX "tg3_load_tso_firmware fails for %s "
5371 "to set CPU PC, is %08x should be %08x\n",
5372 tp->dev->name, tr32(cpu_base + CPU_PC),
5373 info.text_base);
5374 return -ENODEV;
5375 }
5376 tw32(cpu_base + CPU_STATE, 0xffffffff);
5377 tw32_f(cpu_base + CPU_MODE, 0x00000000);
5378 return 0;
5379}
5380
5381#endif /* TG3_TSO_SUPPORT != 0 */
5382
5383/* tp->lock is held. */
5384static void __tg3_set_mac_addr(struct tg3 *tp)
5385{
5386 u32 addr_high, addr_low;
5387 int i;
5388
5389 addr_high = ((tp->dev->dev_addr[0] << 8) |
5390 tp->dev->dev_addr[1]);
5391 addr_low = ((tp->dev->dev_addr[2] << 24) |
5392 (tp->dev->dev_addr[3] << 16) |
5393 (tp->dev->dev_addr[4] << 8) |
5394 (tp->dev->dev_addr[5] << 0));
5395 for (i = 0; i < 4; i++) {
5396 tw32(MAC_ADDR_0_HIGH + (i * 8), addr_high);
5397 tw32(MAC_ADDR_0_LOW + (i * 8), addr_low);
5398 }
5399
5400 if (GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_5703 ||
5401 GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_5704) {
5402 for (i = 0; i < 12; i++) {
5403 tw32(MAC_EXTADDR_0_HIGH + (i * 8), addr_high);
5404 tw32(MAC_EXTADDR_0_LOW + (i * 8), addr_low);
5405 }
5406 }
5407
5408 addr_high = (tp->dev->dev_addr[0] +
5409 tp->dev->dev_addr[1] +
5410 tp->dev->dev_addr[2] +
5411 tp->dev->dev_addr[3] +
5412 tp->dev->dev_addr[4] +
5413 tp->dev->dev_addr[5]) &
5414 TX_BACKOFF_SEED_MASK;
5415 tw32(MAC_TX_BACKOFF_SEED, addr_high);
5416}
5417
5418static int tg3_set_mac_addr(struct net_device *dev, void *p)
5419{
5420 struct tg3 *tp = netdev_priv(dev);
5421 struct sockaddr *addr = p;
5422
5423 memcpy(dev->dev_addr, addr->sa_data, dev->addr_len);
5424
David S. Millerf47c11e2005-06-24 20:18:35 -07005425 spin_lock_bh(&tp->lock);
Linus Torvalds1da177e2005-04-16 15:20:36 -07005426 __tg3_set_mac_addr(tp);
David S. Millerf47c11e2005-06-24 20:18:35 -07005427 spin_unlock_bh(&tp->lock);
Linus Torvalds1da177e2005-04-16 15:20:36 -07005428
5429 return 0;
5430}
5431
5432/* tp->lock is held. */
5433static void tg3_set_bdinfo(struct tg3 *tp, u32 bdinfo_addr,
5434 dma_addr_t mapping, u32 maxlen_flags,
5435 u32 nic_addr)
5436{
5437 tg3_write_mem(tp,
5438 (bdinfo_addr + TG3_BDINFO_HOST_ADDR + TG3_64BIT_REG_HIGH),
5439 ((u64) mapping >> 32));
5440 tg3_write_mem(tp,
5441 (bdinfo_addr + TG3_BDINFO_HOST_ADDR + TG3_64BIT_REG_LOW),
5442 ((u64) mapping & 0xffffffff));
5443 tg3_write_mem(tp,
5444 (bdinfo_addr + TG3_BDINFO_MAXLEN_FLAGS),
5445 maxlen_flags);
5446
5447 if (!(tp->tg3_flags2 & TG3_FLG2_5705_PLUS))
5448 tg3_write_mem(tp,
5449 (bdinfo_addr + TG3_BDINFO_NIC_ADDR),
5450 nic_addr);
5451}
5452
5453static void __tg3_set_rx_mode(struct net_device *);
Michael Chand244c892005-07-05 14:42:33 -07005454static void __tg3_set_coalesce(struct tg3 *tp, struct ethtool_coalesce *ec)
David S. Miller15f98502005-05-18 22:49:26 -07005455{
5456 tw32(HOSTCC_RXCOL_TICKS, ec->rx_coalesce_usecs);
5457 tw32(HOSTCC_TXCOL_TICKS, ec->tx_coalesce_usecs);
5458 tw32(HOSTCC_RXMAX_FRAMES, ec->rx_max_coalesced_frames);
5459 tw32(HOSTCC_TXMAX_FRAMES, ec->tx_max_coalesced_frames);
5460 if (!(tp->tg3_flags2 & TG3_FLG2_5705_PLUS)) {
5461 tw32(HOSTCC_RXCOAL_TICK_INT, ec->rx_coalesce_usecs_irq);
5462 tw32(HOSTCC_TXCOAL_TICK_INT, ec->tx_coalesce_usecs_irq);
5463 }
5464 tw32(HOSTCC_RXCOAL_MAXF_INT, ec->rx_max_coalesced_frames_irq);
5465 tw32(HOSTCC_TXCOAL_MAXF_INT, ec->tx_max_coalesced_frames_irq);
5466 if (!(tp->tg3_flags2 & TG3_FLG2_5705_PLUS)) {
5467 u32 val = ec->stats_block_coalesce_usecs;
5468
5469 if (!netif_carrier_ok(tp->dev))
5470 val = 0;
5471
5472 tw32(HOSTCC_STAT_COAL_TICKS, val);
5473 }
5474}
Linus Torvalds1da177e2005-04-16 15:20:36 -07005475
5476/* tp->lock is held. */
5477static int tg3_reset_hw(struct tg3 *tp)
5478{
5479 u32 val, rdmac_mode;
5480 int i, err, limit;
5481
5482 tg3_disable_ints(tp);
5483
5484 tg3_stop_fw(tp);
5485
5486 tg3_write_sig_pre_reset(tp, RESET_KIND_INIT);
5487
5488 if (tp->tg3_flags & TG3_FLAG_INIT_COMPLETE) {
Michael Chane6de8ad2005-05-05 14:42:41 -07005489 tg3_abort_hw(tp, 1);
Linus Torvalds1da177e2005-04-16 15:20:36 -07005490 }
5491
5492 err = tg3_chip_reset(tp);
5493 if (err)
5494 return err;
5495
5496 tg3_write_sig_legacy(tp, RESET_KIND_INIT);
5497
5498 /* This works around an issue with Athlon chipsets on
5499 * B3 tigon3 silicon. This bit has no effect on any
5500 * other revision. But do not set this on PCI Express
5501 * chips.
5502 */
5503 if (!(tp->tg3_flags2 & TG3_FLG2_PCI_EXPRESS))
5504 tp->pci_clock_ctrl |= CLOCK_CTRL_DELAY_PCI_GRANT;
5505 tw32_f(TG3PCI_CLOCK_CTRL, tp->pci_clock_ctrl);
5506
5507 if (tp->pci_chip_rev_id == CHIPREV_ID_5704_A0 &&
5508 (tp->tg3_flags & TG3_FLAG_PCIX_MODE)) {
5509 val = tr32(TG3PCI_PCISTATE);
5510 val |= PCISTATE_RETRY_SAME_DMA;
5511 tw32(TG3PCI_PCISTATE, val);
5512 }
5513
5514 if (GET_CHIP_REV(tp->pci_chip_rev_id) == CHIPREV_5704_BX) {
5515 /* Enable some hw fixes. */
5516 val = tr32(TG3PCI_MSI_DATA);
5517 val |= (1 << 26) | (1 << 28) | (1 << 29);
5518 tw32(TG3PCI_MSI_DATA, val);
5519 }
5520
5521 /* Descriptor ring init may make accesses to the
5522 * NIC SRAM area to setup the TX descriptors, so we
5523 * can only do this after the hardware has been
5524 * successfully reset.
5525 */
5526 tg3_init_rings(tp);
5527
5528 /* This value is determined during the probe time DMA
5529 * engine test, tg3_test_dma.
5530 */
5531 tw32(TG3PCI_DMA_RW_CTRL, tp->dma_rwctrl);
5532
5533 tp->grc_mode &= ~(GRC_MODE_HOST_SENDBDS |
5534 GRC_MODE_4X_NIC_SEND_RINGS |
5535 GRC_MODE_NO_TX_PHDR_CSUM |
5536 GRC_MODE_NO_RX_PHDR_CSUM);
5537 tp->grc_mode |= GRC_MODE_HOST_SENDBDS;
5538 if (tp->tg3_flags & TG3_FLAG_NO_TX_PSEUDO_CSUM)
5539 tp->grc_mode |= GRC_MODE_NO_TX_PHDR_CSUM;
5540 if (tp->tg3_flags & TG3_FLAG_NO_RX_PSEUDO_CSUM)
5541 tp->grc_mode |= GRC_MODE_NO_RX_PHDR_CSUM;
5542
5543 tw32(GRC_MODE,
5544 tp->grc_mode |
5545 (GRC_MODE_IRQ_ON_MAC_ATTN | GRC_MODE_HOST_STACKUP));
5546
5547 /* Setup the timer prescalar register. Clock is always 66Mhz. */
5548 val = tr32(GRC_MISC_CFG);
5549 val &= ~0xff;
5550 val |= (65 << GRC_MISC_CFG_PRESCALAR_SHIFT);
5551 tw32(GRC_MISC_CFG, val);
5552
5553 /* Initialize MBUF/DESC pool. */
John W. Linvillecbf46852005-04-21 17:01:29 -07005554 if (tp->tg3_flags2 & TG3_FLG2_5750_PLUS) {
Linus Torvalds1da177e2005-04-16 15:20:36 -07005555 /* Do nothing. */
5556 } else if (GET_ASIC_REV(tp->pci_chip_rev_id) != ASIC_REV_5705) {
5557 tw32(BUFMGR_MB_POOL_ADDR, NIC_SRAM_MBUF_POOL_BASE);
5558 if (GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_5704)
5559 tw32(BUFMGR_MB_POOL_SIZE, NIC_SRAM_MBUF_POOL_SIZE64);
5560 else
5561 tw32(BUFMGR_MB_POOL_SIZE, NIC_SRAM_MBUF_POOL_SIZE96);
5562 tw32(BUFMGR_DMA_DESC_POOL_ADDR, NIC_SRAM_DMA_DESC_POOL_BASE);
5563 tw32(BUFMGR_DMA_DESC_POOL_SIZE, NIC_SRAM_DMA_DESC_POOL_SIZE);
5564 }
5565#if TG3_TSO_SUPPORT != 0
5566 else if (tp->tg3_flags2 & TG3_FLG2_TSO_CAPABLE) {
5567 int fw_len;
5568
5569 fw_len = (TG3_TSO5_FW_TEXT_LEN +
5570 TG3_TSO5_FW_RODATA_LEN +
5571 TG3_TSO5_FW_DATA_LEN +
5572 TG3_TSO5_FW_SBSS_LEN +
5573 TG3_TSO5_FW_BSS_LEN);
5574 fw_len = (fw_len + (0x80 - 1)) & ~(0x80 - 1);
5575 tw32(BUFMGR_MB_POOL_ADDR,
5576 NIC_SRAM_MBUF_POOL_BASE5705 + fw_len);
5577 tw32(BUFMGR_MB_POOL_SIZE,
5578 NIC_SRAM_MBUF_POOL_SIZE5705 - fw_len - 0xa00);
5579 }
5580#endif
5581
Michael Chan0f893dc2005-07-25 12:30:38 -07005582 if (tp->dev->mtu <= ETH_DATA_LEN) {
Linus Torvalds1da177e2005-04-16 15:20:36 -07005583 tw32(BUFMGR_MB_RDMA_LOW_WATER,
5584 tp->bufmgr_config.mbuf_read_dma_low_water);
5585 tw32(BUFMGR_MB_MACRX_LOW_WATER,
5586 tp->bufmgr_config.mbuf_mac_rx_low_water);
5587 tw32(BUFMGR_MB_HIGH_WATER,
5588 tp->bufmgr_config.mbuf_high_water);
5589 } else {
5590 tw32(BUFMGR_MB_RDMA_LOW_WATER,
5591 tp->bufmgr_config.mbuf_read_dma_low_water_jumbo);
5592 tw32(BUFMGR_MB_MACRX_LOW_WATER,
5593 tp->bufmgr_config.mbuf_mac_rx_low_water_jumbo);
5594 tw32(BUFMGR_MB_HIGH_WATER,
5595 tp->bufmgr_config.mbuf_high_water_jumbo);
5596 }
5597 tw32(BUFMGR_DMA_LOW_WATER,
5598 tp->bufmgr_config.dma_low_water);
5599 tw32(BUFMGR_DMA_HIGH_WATER,
5600 tp->bufmgr_config.dma_high_water);
5601
5602 tw32(BUFMGR_MODE, BUFMGR_MODE_ENABLE | BUFMGR_MODE_ATTN_ENABLE);
5603 for (i = 0; i < 2000; i++) {
5604 if (tr32(BUFMGR_MODE) & BUFMGR_MODE_ENABLE)
5605 break;
5606 udelay(10);
5607 }
5608 if (i >= 2000) {
5609 printk(KERN_ERR PFX "tg3_reset_hw cannot enable BUFMGR for %s.\n",
5610 tp->dev->name);
5611 return -ENODEV;
5612 }
5613
5614 /* Setup replenish threshold. */
5615 tw32(RCVBDI_STD_THRESH, tp->rx_pending / 8);
5616
5617 /* Initialize TG3_BDINFO's at:
5618 * RCVDBDI_STD_BD: standard eth size rx ring
5619 * RCVDBDI_JUMBO_BD: jumbo frame rx ring
5620 * RCVDBDI_MINI_BD: small frame rx ring (??? does not work)
5621 *
5622 * like so:
5623 * TG3_BDINFO_HOST_ADDR: high/low parts of DMA address of ring
5624 * TG3_BDINFO_MAXLEN_FLAGS: (rx max buffer size << 16) |
5625 * ring attribute flags
5626 * TG3_BDINFO_NIC_ADDR: location of descriptors in nic SRAM
5627 *
5628 * Standard receive ring @ NIC_SRAM_RX_BUFFER_DESC, 512 entries.
5629 * Jumbo receive ring @ NIC_SRAM_RX_JUMBO_BUFFER_DESC, 256 entries.
5630 *
5631 * The size of each ring is fixed in the firmware, but the location is
5632 * configurable.
5633 */
5634 tw32(RCVDBDI_STD_BD + TG3_BDINFO_HOST_ADDR + TG3_64BIT_REG_HIGH,
5635 ((u64) tp->rx_std_mapping >> 32));
5636 tw32(RCVDBDI_STD_BD + TG3_BDINFO_HOST_ADDR + TG3_64BIT_REG_LOW,
5637 ((u64) tp->rx_std_mapping & 0xffffffff));
5638 tw32(RCVDBDI_STD_BD + TG3_BDINFO_NIC_ADDR,
5639 NIC_SRAM_RX_BUFFER_DESC);
5640
5641 /* Don't even try to program the JUMBO/MINI buffer descriptor
5642 * configs on 5705.
5643 */
5644 if (tp->tg3_flags2 & TG3_FLG2_5705_PLUS) {
5645 tw32(RCVDBDI_STD_BD + TG3_BDINFO_MAXLEN_FLAGS,
5646 RX_STD_MAX_SIZE_5705 << BDINFO_FLAGS_MAXLEN_SHIFT);
5647 } else {
5648 tw32(RCVDBDI_STD_BD + TG3_BDINFO_MAXLEN_FLAGS,
5649 RX_STD_MAX_SIZE << BDINFO_FLAGS_MAXLEN_SHIFT);
5650
5651 tw32(RCVDBDI_MINI_BD + TG3_BDINFO_MAXLEN_FLAGS,
5652 BDINFO_FLAGS_DISABLED);
5653
5654 /* Setup replenish threshold. */
5655 tw32(RCVBDI_JUMBO_THRESH, tp->rx_jumbo_pending / 8);
5656
Michael Chan0f893dc2005-07-25 12:30:38 -07005657 if (tp->tg3_flags & TG3_FLAG_JUMBO_RING_ENABLE) {
Linus Torvalds1da177e2005-04-16 15:20:36 -07005658 tw32(RCVDBDI_JUMBO_BD + TG3_BDINFO_HOST_ADDR + TG3_64BIT_REG_HIGH,
5659 ((u64) tp->rx_jumbo_mapping >> 32));
5660 tw32(RCVDBDI_JUMBO_BD + TG3_BDINFO_HOST_ADDR + TG3_64BIT_REG_LOW,
5661 ((u64) tp->rx_jumbo_mapping & 0xffffffff));
5662 tw32(RCVDBDI_JUMBO_BD + TG3_BDINFO_MAXLEN_FLAGS,
5663 RX_JUMBO_MAX_SIZE << BDINFO_FLAGS_MAXLEN_SHIFT);
5664 tw32(RCVDBDI_JUMBO_BD + TG3_BDINFO_NIC_ADDR,
5665 NIC_SRAM_RX_JUMBO_BUFFER_DESC);
5666 } else {
5667 tw32(RCVDBDI_JUMBO_BD + TG3_BDINFO_MAXLEN_FLAGS,
5668 BDINFO_FLAGS_DISABLED);
5669 }
5670
5671 }
5672
5673 /* There is only one send ring on 5705/5750, no need to explicitly
5674 * disable the others.
5675 */
5676 if (!(tp->tg3_flags2 & TG3_FLG2_5705_PLUS)) {
5677 /* Clear out send RCB ring in SRAM. */
5678 for (i = NIC_SRAM_SEND_RCB; i < NIC_SRAM_RCV_RET_RCB; i += TG3_BDINFO_SIZE)
5679 tg3_write_mem(tp, i + TG3_BDINFO_MAXLEN_FLAGS,
5680 BDINFO_FLAGS_DISABLED);
5681 }
5682
5683 tp->tx_prod = 0;
5684 tp->tx_cons = 0;
5685 tw32_mailbox(MAILBOX_SNDHOST_PROD_IDX_0 + TG3_64BIT_REG_LOW, 0);
5686 tw32_tx_mbox(MAILBOX_SNDNIC_PROD_IDX_0 + TG3_64BIT_REG_LOW, 0);
5687
5688 tg3_set_bdinfo(tp, NIC_SRAM_SEND_RCB,
5689 tp->tx_desc_mapping,
5690 (TG3_TX_RING_SIZE <<
5691 BDINFO_FLAGS_MAXLEN_SHIFT),
5692 NIC_SRAM_TX_BUFFER_DESC);
5693
5694 /* There is only one receive return ring on 5705/5750, no need
5695 * to explicitly disable the others.
5696 */
5697 if (!(tp->tg3_flags2 & TG3_FLG2_5705_PLUS)) {
5698 for (i = NIC_SRAM_RCV_RET_RCB; i < NIC_SRAM_STATS_BLK;
5699 i += TG3_BDINFO_SIZE) {
5700 tg3_write_mem(tp, i + TG3_BDINFO_MAXLEN_FLAGS,
5701 BDINFO_FLAGS_DISABLED);
5702 }
5703 }
5704
5705 tp->rx_rcb_ptr = 0;
5706 tw32_rx_mbox(MAILBOX_RCVRET_CON_IDX_0 + TG3_64BIT_REG_LOW, 0);
5707
5708 tg3_set_bdinfo(tp, NIC_SRAM_RCV_RET_RCB,
5709 tp->rx_rcb_mapping,
5710 (TG3_RX_RCB_RING_SIZE(tp) <<
5711 BDINFO_FLAGS_MAXLEN_SHIFT),
5712 0);
5713
5714 tp->rx_std_ptr = tp->rx_pending;
5715 tw32_rx_mbox(MAILBOX_RCV_STD_PROD_IDX + TG3_64BIT_REG_LOW,
5716 tp->rx_std_ptr);
5717
Michael Chan0f893dc2005-07-25 12:30:38 -07005718 tp->rx_jumbo_ptr = (tp->tg3_flags & TG3_FLAG_JUMBO_RING_ENABLE) ?
Linus Torvalds1da177e2005-04-16 15:20:36 -07005719 tp->rx_jumbo_pending : 0;
5720 tw32_rx_mbox(MAILBOX_RCV_JUMBO_PROD_IDX + TG3_64BIT_REG_LOW,
5721 tp->rx_jumbo_ptr);
5722
5723 /* Initialize MAC address and backoff seed. */
5724 __tg3_set_mac_addr(tp);
5725
5726 /* MTU + ethernet header + FCS + optional VLAN tag */
5727 tw32(MAC_RX_MTU_SIZE, tp->dev->mtu + ETH_HLEN + 8);
5728
5729 /* The slot time is changed by tg3_setup_phy if we
5730 * run at gigabit with half duplex.
5731 */
5732 tw32(MAC_TX_LENGTHS,
5733 (2 << TX_LENGTHS_IPG_CRS_SHIFT) |
5734 (6 << TX_LENGTHS_IPG_SHIFT) |
5735 (32 << TX_LENGTHS_SLOT_TIME_SHIFT));
5736
5737 /* Receive rules. */
5738 tw32(MAC_RCV_RULE_CFG, RCV_RULE_CFG_DEFAULT_CLASS);
5739 tw32(RCVLPC_CONFIG, 0x0181);
5740
5741 /* Calculate RDMAC_MODE setting early, we need it to determine
5742 * the RCVLPC_STATE_ENABLE mask.
5743 */
5744 rdmac_mode = (RDMAC_MODE_ENABLE | RDMAC_MODE_TGTABORT_ENAB |
5745 RDMAC_MODE_MSTABORT_ENAB | RDMAC_MODE_PARITYERR_ENAB |
5746 RDMAC_MODE_ADDROFLOW_ENAB | RDMAC_MODE_FIFOOFLOW_ENAB |
5747 RDMAC_MODE_FIFOURUN_ENAB | RDMAC_MODE_FIFOOREAD_ENAB |
5748 RDMAC_MODE_LNGREAD_ENAB);
5749 if (tp->tg3_flags & TG3_FLAG_SPLIT_MODE)
5750 rdmac_mode |= RDMAC_MODE_SPLIT_ENABLE;
Michael Chan85e94ce2005-04-21 17:05:28 -07005751
5752 /* If statement applies to 5705 and 5750 PCI devices only */
5753 if ((GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_5705 &&
5754 tp->pci_chip_rev_id != CHIPREV_ID_5705_A0) ||
5755 (GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_5750)) {
Linus Torvalds1da177e2005-04-16 15:20:36 -07005756 if (tp->tg3_flags2 & TG3_FLG2_TSO_CAPABLE &&
5757 (tp->pci_chip_rev_id == CHIPREV_ID_5705_A1 ||
5758 tp->pci_chip_rev_id == CHIPREV_ID_5705_A2)) {
5759 rdmac_mode |= RDMAC_MODE_FIFO_SIZE_128;
5760 } else if (!(tr32(TG3PCI_PCISTATE) & PCISTATE_BUS_SPEED_HIGH) &&
5761 !(tp->tg3_flags2 & TG3_FLG2_IS_5788)) {
5762 rdmac_mode |= RDMAC_MODE_FIFO_LONG_BURST;
5763 }
5764 }
5765
Michael Chan85e94ce2005-04-21 17:05:28 -07005766 if (tp->tg3_flags2 & TG3_FLG2_PCI_EXPRESS)
5767 rdmac_mode |= RDMAC_MODE_FIFO_LONG_BURST;
5768
Linus Torvalds1da177e2005-04-16 15:20:36 -07005769#if TG3_TSO_SUPPORT != 0
5770 if (tp->tg3_flags2 & TG3_FLG2_HW_TSO)
5771 rdmac_mode |= (1 << 27);
5772#endif
5773
5774 /* Receive/send statistics. */
5775 if ((rdmac_mode & RDMAC_MODE_FIFO_SIZE_128) &&
5776 (tp->tg3_flags2 & TG3_FLG2_TSO_CAPABLE)) {
5777 val = tr32(RCVLPC_STATS_ENABLE);
5778 val &= ~RCVLPC_STATSENAB_LNGBRST_RFIX;
5779 tw32(RCVLPC_STATS_ENABLE, val);
5780 } else {
5781 tw32(RCVLPC_STATS_ENABLE, 0xffffff);
5782 }
5783 tw32(RCVLPC_STATSCTRL, RCVLPC_STATSCTRL_ENABLE);
5784 tw32(SNDDATAI_STATSENAB, 0xffffff);
5785 tw32(SNDDATAI_STATSCTRL,
5786 (SNDDATAI_SCTRL_ENABLE |
5787 SNDDATAI_SCTRL_FASTUPD));
5788
5789 /* Setup host coalescing engine. */
5790 tw32(HOSTCC_MODE, 0);
5791 for (i = 0; i < 2000; i++) {
5792 if (!(tr32(HOSTCC_MODE) & HOSTCC_MODE_ENABLE))
5793 break;
5794 udelay(10);
5795 }
5796
Michael Chand244c892005-07-05 14:42:33 -07005797 __tg3_set_coalesce(tp, &tp->coal);
Linus Torvalds1da177e2005-04-16 15:20:36 -07005798
5799 /* set status block DMA address */
5800 tw32(HOSTCC_STATUS_BLK_HOST_ADDR + TG3_64BIT_REG_HIGH,
5801 ((u64) tp->status_mapping >> 32));
5802 tw32(HOSTCC_STATUS_BLK_HOST_ADDR + TG3_64BIT_REG_LOW,
5803 ((u64) tp->status_mapping & 0xffffffff));
5804
5805 if (!(tp->tg3_flags2 & TG3_FLG2_5705_PLUS)) {
5806 /* Status/statistics block address. See tg3_timer,
5807 * the tg3_periodic_fetch_stats call there, and
5808 * tg3_get_stats to see how this works for 5705/5750 chips.
5809 */
Linus Torvalds1da177e2005-04-16 15:20:36 -07005810 tw32(HOSTCC_STATS_BLK_HOST_ADDR + TG3_64BIT_REG_HIGH,
5811 ((u64) tp->stats_mapping >> 32));
5812 tw32(HOSTCC_STATS_BLK_HOST_ADDR + TG3_64BIT_REG_LOW,
5813 ((u64) tp->stats_mapping & 0xffffffff));
5814 tw32(HOSTCC_STATS_BLK_NIC_ADDR, NIC_SRAM_STATS_BLK);
5815 tw32(HOSTCC_STATUS_BLK_NIC_ADDR, NIC_SRAM_STATUS_BLK);
5816 }
5817
5818 tw32(HOSTCC_MODE, HOSTCC_MODE_ENABLE | tp->coalesce_mode);
5819
5820 tw32(RCVCC_MODE, RCVCC_MODE_ENABLE | RCVCC_MODE_ATTN_ENABLE);
5821 tw32(RCVLPC_MODE, RCVLPC_MODE_ENABLE);
5822 if (!(tp->tg3_flags2 & TG3_FLG2_5705_PLUS))
5823 tw32(RCVLSC_MODE, RCVLSC_MODE_ENABLE | RCVLSC_MODE_ATTN_ENABLE);
5824
5825 /* Clear statistics/status block in chip, and status block in ram. */
5826 for (i = NIC_SRAM_STATS_BLK;
5827 i < NIC_SRAM_STATUS_BLK + TG3_HW_STATUS_SIZE;
5828 i += sizeof(u32)) {
5829 tg3_write_mem(tp, i, 0);
5830 udelay(40);
5831 }
5832 memset(tp->hw_status, 0, TG3_HW_STATUS_SIZE);
5833
5834 tp->mac_mode = MAC_MODE_TXSTAT_ENABLE | MAC_MODE_RXSTAT_ENABLE |
5835 MAC_MODE_TDE_ENABLE | MAC_MODE_RDE_ENABLE | MAC_MODE_FHDE_ENABLE;
5836 tw32_f(MAC_MODE, tp->mac_mode | MAC_MODE_RXSTAT_CLEAR | MAC_MODE_TXSTAT_CLEAR);
5837 udelay(40);
5838
Michael Chan314fba32005-04-21 17:07:04 -07005839 /* tp->grc_local_ctrl is partially set up during tg3_get_invariants().
5840 * If TG3_FLAG_EEPROM_WRITE_PROT is set, we should read the
5841 * register to preserve the GPIO settings for LOMs. The GPIOs,
5842 * whether used as inputs or outputs, are set by boot code after
5843 * reset.
5844 */
5845 if (tp->tg3_flags & TG3_FLAG_EEPROM_WRITE_PROT) {
5846 u32 gpio_mask;
5847
5848 gpio_mask = GRC_LCLCTRL_GPIO_OE0 | GRC_LCLCTRL_GPIO_OE2 |
5849 GRC_LCLCTRL_GPIO_OUTPUT0 | GRC_LCLCTRL_GPIO_OUTPUT2;
Michael Chan3e7d83b2005-04-21 17:10:36 -07005850
5851 if (GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_5752)
5852 gpio_mask |= GRC_LCLCTRL_GPIO_OE3 |
5853 GRC_LCLCTRL_GPIO_OUTPUT3;
5854
Michael Chan314fba32005-04-21 17:07:04 -07005855 tp->grc_local_ctrl |= tr32(GRC_LOCAL_CTRL) & gpio_mask;
5856
5857 /* GPIO1 must be driven high for eeprom write protect */
Linus Torvalds1da177e2005-04-16 15:20:36 -07005858 tp->grc_local_ctrl |= (GRC_LCLCTRL_GPIO_OE1 |
5859 GRC_LCLCTRL_GPIO_OUTPUT1);
Michael Chan314fba32005-04-21 17:07:04 -07005860 }
Linus Torvalds1da177e2005-04-16 15:20:36 -07005861 tw32_f(GRC_LOCAL_CTRL, tp->grc_local_ctrl);
5862 udelay(100);
5863
Michael Chan09ee9292005-08-09 20:17:00 -07005864 tw32_mailbox_f(MAILBOX_INTERRUPT_0 + TG3_64BIT_REG_LOW, 0);
David S. Millerfac9b832005-05-18 22:46:34 -07005865 tp->last_tag = 0;
Linus Torvalds1da177e2005-04-16 15:20:36 -07005866
5867 if (!(tp->tg3_flags2 & TG3_FLG2_5705_PLUS)) {
5868 tw32_f(DMAC_MODE, DMAC_MODE_ENABLE);
5869 udelay(40);
5870 }
5871
5872 val = (WDMAC_MODE_ENABLE | WDMAC_MODE_TGTABORT_ENAB |
5873 WDMAC_MODE_MSTABORT_ENAB | WDMAC_MODE_PARITYERR_ENAB |
5874 WDMAC_MODE_ADDROFLOW_ENAB | WDMAC_MODE_FIFOOFLOW_ENAB |
5875 WDMAC_MODE_FIFOURUN_ENAB | WDMAC_MODE_FIFOOREAD_ENAB |
5876 WDMAC_MODE_LNGREAD_ENAB);
5877
Michael Chan85e94ce2005-04-21 17:05:28 -07005878 /* If statement applies to 5705 and 5750 PCI devices only */
5879 if ((GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_5705 &&
5880 tp->pci_chip_rev_id != CHIPREV_ID_5705_A0) ||
5881 GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_5750) {
Linus Torvalds1da177e2005-04-16 15:20:36 -07005882 if ((tp->tg3_flags & TG3_FLG2_TSO_CAPABLE) &&
5883 (tp->pci_chip_rev_id == CHIPREV_ID_5705_A1 ||
5884 tp->pci_chip_rev_id == CHIPREV_ID_5705_A2)) {
5885 /* nothing */
5886 } else if (!(tr32(TG3PCI_PCISTATE) & PCISTATE_BUS_SPEED_HIGH) &&
5887 !(tp->tg3_flags2 & TG3_FLG2_IS_5788) &&
5888 !(tp->tg3_flags2 & TG3_FLG2_PCI_EXPRESS)) {
5889 val |= WDMAC_MODE_RX_ACCEL;
5890 }
5891 }
5892
5893 tw32_f(WDMAC_MODE, val);
5894 udelay(40);
5895
5896 if ((tp->tg3_flags & TG3_FLAG_PCIX_MODE) != 0) {
5897 val = tr32(TG3PCI_X_CAPS);
5898 if (GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_5703) {
5899 val &= ~PCIX_CAPS_BURST_MASK;
5900 val |= (PCIX_CAPS_MAX_BURST_CPIOB << PCIX_CAPS_BURST_SHIFT);
5901 } else if (GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_5704) {
5902 val &= ~(PCIX_CAPS_SPLIT_MASK | PCIX_CAPS_BURST_MASK);
5903 val |= (PCIX_CAPS_MAX_BURST_CPIOB << PCIX_CAPS_BURST_SHIFT);
5904 if (tp->tg3_flags & TG3_FLAG_SPLIT_MODE)
5905 val |= (tp->split_mode_max_reqs <<
5906 PCIX_CAPS_SPLIT_SHIFT);
5907 }
5908 tw32(TG3PCI_X_CAPS, val);
5909 }
5910
5911 tw32_f(RDMAC_MODE, rdmac_mode);
5912 udelay(40);
5913
5914 tw32(RCVDCC_MODE, RCVDCC_MODE_ENABLE | RCVDCC_MODE_ATTN_ENABLE);
5915 if (!(tp->tg3_flags2 & TG3_FLG2_5705_PLUS))
5916 tw32(MBFREE_MODE, MBFREE_MODE_ENABLE);
5917 tw32(SNDDATAC_MODE, SNDDATAC_MODE_ENABLE);
5918 tw32(SNDBDC_MODE, SNDBDC_MODE_ENABLE | SNDBDC_MODE_ATTN_ENABLE);
5919 tw32(RCVBDI_MODE, RCVBDI_MODE_ENABLE | RCVBDI_MODE_RCB_ATTN_ENAB);
5920 tw32(RCVDBDI_MODE, RCVDBDI_MODE_ENABLE | RCVDBDI_MODE_INV_RING_SZ);
5921 tw32(SNDDATAI_MODE, SNDDATAI_MODE_ENABLE);
5922#if TG3_TSO_SUPPORT != 0
5923 if (tp->tg3_flags2 & TG3_FLG2_HW_TSO)
5924 tw32(SNDDATAI_MODE, SNDDATAI_MODE_ENABLE | 0x8);
5925#endif
5926 tw32(SNDBDI_MODE, SNDBDI_MODE_ENABLE | SNDBDI_MODE_ATTN_ENABLE);
5927 tw32(SNDBDS_MODE, SNDBDS_MODE_ENABLE | SNDBDS_MODE_ATTN_ENABLE);
5928
5929 if (tp->pci_chip_rev_id == CHIPREV_ID_5701_A0) {
5930 err = tg3_load_5701_a0_firmware_fix(tp);
5931 if (err)
5932 return err;
5933 }
5934
5935#if TG3_TSO_SUPPORT != 0
5936 if (tp->tg3_flags2 & TG3_FLG2_TSO_CAPABLE) {
5937 err = tg3_load_tso_firmware(tp);
5938 if (err)
5939 return err;
5940 }
5941#endif
5942
5943 tp->tx_mode = TX_MODE_ENABLE;
5944 tw32_f(MAC_TX_MODE, tp->tx_mode);
5945 udelay(100);
5946
5947 tp->rx_mode = RX_MODE_ENABLE;
5948 tw32_f(MAC_RX_MODE, tp->rx_mode);
5949 udelay(10);
5950
5951 if (tp->link_config.phy_is_low_power) {
5952 tp->link_config.phy_is_low_power = 0;
5953 tp->link_config.speed = tp->link_config.orig_speed;
5954 tp->link_config.duplex = tp->link_config.orig_duplex;
5955 tp->link_config.autoneg = tp->link_config.orig_autoneg;
5956 }
5957
5958 tp->mi_mode = MAC_MI_MODE_BASE;
5959 tw32_f(MAC_MI_MODE, tp->mi_mode);
5960 udelay(80);
5961
5962 tw32(MAC_LED_CTRL, tp->led_ctrl);
5963
5964 tw32(MAC_MI_STAT, MAC_MI_STAT_LNKSTAT_ATTN_ENAB);
Michael Chan4c987482005-09-05 17:52:38 -07005965 if (tp->tg3_flags2 & TG3_FLG2_ANY_SERDES) {
Linus Torvalds1da177e2005-04-16 15:20:36 -07005966 tw32_f(MAC_RX_MODE, RX_MODE_RESET);
5967 udelay(10);
5968 }
5969 tw32_f(MAC_RX_MODE, tp->rx_mode);
5970 udelay(10);
5971
5972 if (tp->tg3_flags2 & TG3_FLG2_PHY_SERDES) {
5973 if ((GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_5704) &&
5974 !(tp->tg3_flags2 & TG3_FLG2_SERDES_PREEMPHASIS)) {
5975 /* Set drive transmission level to 1.2V */
5976 /* only if the signal pre-emphasis bit is not set */
5977 val = tr32(MAC_SERDES_CFG);
5978 val &= 0xfffff000;
5979 val |= 0x880;
5980 tw32(MAC_SERDES_CFG, val);
5981 }
5982 if (tp->pci_chip_rev_id == CHIPREV_ID_5703_A1)
5983 tw32(MAC_SERDES_CFG, 0x616000);
5984 }
5985
5986 /* Prevent chip from dropping frames when flow control
5987 * is enabled.
5988 */
5989 tw32_f(MAC_LOW_WMARK_MAX_RX_FRAME, 2);
5990
5991 if (GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_5704 &&
5992 (tp->tg3_flags2 & TG3_FLG2_PHY_SERDES)) {
5993 /* Use hardware link auto-negotiation */
5994 tp->tg3_flags2 |= TG3_FLG2_HW_AUTONEG;
5995 }
5996
5997 err = tg3_setup_phy(tp, 1);
5998 if (err)
5999 return err;
6000
6001 if (!(tp->tg3_flags2 & TG3_FLG2_PHY_SERDES)) {
6002 u32 tmp;
6003
6004 /* Clear CRC stats. */
6005 if (!tg3_readphy(tp, 0x1e, &tmp)) {
6006 tg3_writephy(tp, 0x1e, tmp | 0x8000);
6007 tg3_readphy(tp, 0x14, &tmp);
6008 }
6009 }
6010
6011 __tg3_set_rx_mode(tp->dev);
6012
6013 /* Initialize receive rules. */
6014 tw32(MAC_RCV_RULE_0, 0xc2000000 & RCV_RULE_DISABLE_MASK);
6015 tw32(MAC_RCV_VALUE_0, 0xffffffff & RCV_RULE_DISABLE_MASK);
6016 tw32(MAC_RCV_RULE_1, 0x86000004 & RCV_RULE_DISABLE_MASK);
6017 tw32(MAC_RCV_VALUE_1, 0xffffffff & RCV_RULE_DISABLE_MASK);
6018
Michael Chan4cf78e42005-07-25 12:29:19 -07006019 if ((tp->tg3_flags2 & TG3_FLG2_5705_PLUS) &&
6020 (GET_ASIC_REV(tp->pci_chip_rev_id) != ASIC_REV_5780))
Linus Torvalds1da177e2005-04-16 15:20:36 -07006021 limit = 8;
6022 else
6023 limit = 16;
6024 if (tp->tg3_flags & TG3_FLAG_ENABLE_ASF)
6025 limit -= 4;
6026 switch (limit) {
6027 case 16:
6028 tw32(MAC_RCV_RULE_15, 0); tw32(MAC_RCV_VALUE_15, 0);
6029 case 15:
6030 tw32(MAC_RCV_RULE_14, 0); tw32(MAC_RCV_VALUE_14, 0);
6031 case 14:
6032 tw32(MAC_RCV_RULE_13, 0); tw32(MAC_RCV_VALUE_13, 0);
6033 case 13:
6034 tw32(MAC_RCV_RULE_12, 0); tw32(MAC_RCV_VALUE_12, 0);
6035 case 12:
6036 tw32(MAC_RCV_RULE_11, 0); tw32(MAC_RCV_VALUE_11, 0);
6037 case 11:
6038 tw32(MAC_RCV_RULE_10, 0); tw32(MAC_RCV_VALUE_10, 0);
6039 case 10:
6040 tw32(MAC_RCV_RULE_9, 0); tw32(MAC_RCV_VALUE_9, 0);
6041 case 9:
6042 tw32(MAC_RCV_RULE_8, 0); tw32(MAC_RCV_VALUE_8, 0);
6043 case 8:
6044 tw32(MAC_RCV_RULE_7, 0); tw32(MAC_RCV_VALUE_7, 0);
6045 case 7:
6046 tw32(MAC_RCV_RULE_6, 0); tw32(MAC_RCV_VALUE_6, 0);
6047 case 6:
6048 tw32(MAC_RCV_RULE_5, 0); tw32(MAC_RCV_VALUE_5, 0);
6049 case 5:
6050 tw32(MAC_RCV_RULE_4, 0); tw32(MAC_RCV_VALUE_4, 0);
6051 case 4:
6052 /* tw32(MAC_RCV_RULE_3, 0); tw32(MAC_RCV_VALUE_3, 0); */
6053 case 3:
6054 /* tw32(MAC_RCV_RULE_2, 0); tw32(MAC_RCV_VALUE_2, 0); */
6055 case 2:
6056 case 1:
6057
6058 default:
6059 break;
6060 };
6061
6062 tg3_write_sig_post_reset(tp, RESET_KIND_INIT);
6063
Linus Torvalds1da177e2005-04-16 15:20:36 -07006064 return 0;
6065}
6066
6067/* Called at device open time to get the chip ready for
6068 * packet processing. Invoked with tp->lock held.
6069 */
6070static int tg3_init_hw(struct tg3 *tp)
6071{
6072 int err;
6073
6074 /* Force the chip into D0. */
6075 err = tg3_set_power_state(tp, 0);
6076 if (err)
6077 goto out;
6078
6079 tg3_switch_clocks(tp);
6080
6081 tw32(TG3PCI_MEM_WIN_BASE_ADDR, 0);
6082
6083 err = tg3_reset_hw(tp);
6084
6085out:
6086 return err;
6087}
6088
6089#define TG3_STAT_ADD32(PSTAT, REG) \
6090do { u32 __val = tr32(REG); \
6091 (PSTAT)->low += __val; \
6092 if ((PSTAT)->low < __val) \
6093 (PSTAT)->high += 1; \
6094} while (0)
6095
6096static void tg3_periodic_fetch_stats(struct tg3 *tp)
6097{
6098 struct tg3_hw_stats *sp = tp->hw_stats;
6099
6100 if (!netif_carrier_ok(tp->dev))
6101 return;
6102
6103 TG3_STAT_ADD32(&sp->tx_octets, MAC_TX_STATS_OCTETS);
6104 TG3_STAT_ADD32(&sp->tx_collisions, MAC_TX_STATS_COLLISIONS);
6105 TG3_STAT_ADD32(&sp->tx_xon_sent, MAC_TX_STATS_XON_SENT);
6106 TG3_STAT_ADD32(&sp->tx_xoff_sent, MAC_TX_STATS_XOFF_SENT);
6107 TG3_STAT_ADD32(&sp->tx_mac_errors, MAC_TX_STATS_MAC_ERRORS);
6108 TG3_STAT_ADD32(&sp->tx_single_collisions, MAC_TX_STATS_SINGLE_COLLISIONS);
6109 TG3_STAT_ADD32(&sp->tx_mult_collisions, MAC_TX_STATS_MULT_COLLISIONS);
6110 TG3_STAT_ADD32(&sp->tx_deferred, MAC_TX_STATS_DEFERRED);
6111 TG3_STAT_ADD32(&sp->tx_excessive_collisions, MAC_TX_STATS_EXCESSIVE_COL);
6112 TG3_STAT_ADD32(&sp->tx_late_collisions, MAC_TX_STATS_LATE_COL);
6113 TG3_STAT_ADD32(&sp->tx_ucast_packets, MAC_TX_STATS_UCAST);
6114 TG3_STAT_ADD32(&sp->tx_mcast_packets, MAC_TX_STATS_MCAST);
6115 TG3_STAT_ADD32(&sp->tx_bcast_packets, MAC_TX_STATS_BCAST);
6116
6117 TG3_STAT_ADD32(&sp->rx_octets, MAC_RX_STATS_OCTETS);
6118 TG3_STAT_ADD32(&sp->rx_fragments, MAC_RX_STATS_FRAGMENTS);
6119 TG3_STAT_ADD32(&sp->rx_ucast_packets, MAC_RX_STATS_UCAST);
6120 TG3_STAT_ADD32(&sp->rx_mcast_packets, MAC_RX_STATS_MCAST);
6121 TG3_STAT_ADD32(&sp->rx_bcast_packets, MAC_RX_STATS_BCAST);
6122 TG3_STAT_ADD32(&sp->rx_fcs_errors, MAC_RX_STATS_FCS_ERRORS);
6123 TG3_STAT_ADD32(&sp->rx_align_errors, MAC_RX_STATS_ALIGN_ERRORS);
6124 TG3_STAT_ADD32(&sp->rx_xon_pause_rcvd, MAC_RX_STATS_XON_PAUSE_RECVD);
6125 TG3_STAT_ADD32(&sp->rx_xoff_pause_rcvd, MAC_RX_STATS_XOFF_PAUSE_RECVD);
6126 TG3_STAT_ADD32(&sp->rx_mac_ctrl_rcvd, MAC_RX_STATS_MAC_CTRL_RECVD);
6127 TG3_STAT_ADD32(&sp->rx_xoff_entered, MAC_RX_STATS_XOFF_ENTERED);
6128 TG3_STAT_ADD32(&sp->rx_frame_too_long_errors, MAC_RX_STATS_FRAME_TOO_LONG);
6129 TG3_STAT_ADD32(&sp->rx_jabbers, MAC_RX_STATS_JABBERS);
6130 TG3_STAT_ADD32(&sp->rx_undersize_packets, MAC_RX_STATS_UNDERSIZE);
6131}
6132
6133static void tg3_timer(unsigned long __opaque)
6134{
6135 struct tg3 *tp = (struct tg3 *) __opaque;
Linus Torvalds1da177e2005-04-16 15:20:36 -07006136
David S. Millerf47c11e2005-06-24 20:18:35 -07006137 spin_lock(&tp->lock);
Linus Torvalds1da177e2005-04-16 15:20:36 -07006138
David S. Millerfac9b832005-05-18 22:46:34 -07006139 if (!(tp->tg3_flags & TG3_FLAG_TAGGED_STATUS)) {
6140 /* All of this garbage is because when using non-tagged
6141 * IRQ status the mailbox/status_block protocol the chip
6142 * uses with the cpu is race prone.
6143 */
6144 if (tp->hw_status->status & SD_STATUS_UPDATED) {
6145 tw32(GRC_LOCAL_CTRL,
6146 tp->grc_local_ctrl | GRC_LCLCTRL_SETINT);
6147 } else {
6148 tw32(HOSTCC_MODE, tp->coalesce_mode |
6149 (HOSTCC_MODE_ENABLE | HOSTCC_MODE_NOW));
6150 }
Linus Torvalds1da177e2005-04-16 15:20:36 -07006151
David S. Millerfac9b832005-05-18 22:46:34 -07006152 if (!(tr32(WDMAC_MODE) & WDMAC_MODE_ENABLE)) {
6153 tp->tg3_flags2 |= TG3_FLG2_RESTART_TIMER;
David S. Millerf47c11e2005-06-24 20:18:35 -07006154 spin_unlock(&tp->lock);
David S. Millerfac9b832005-05-18 22:46:34 -07006155 schedule_work(&tp->reset_task);
6156 return;
6157 }
Linus Torvalds1da177e2005-04-16 15:20:36 -07006158 }
6159
Linus Torvalds1da177e2005-04-16 15:20:36 -07006160 /* This part only runs once per second. */
6161 if (!--tp->timer_counter) {
David S. Millerfac9b832005-05-18 22:46:34 -07006162 if (tp->tg3_flags2 & TG3_FLG2_5705_PLUS)
6163 tg3_periodic_fetch_stats(tp);
6164
Linus Torvalds1da177e2005-04-16 15:20:36 -07006165 if (tp->tg3_flags & TG3_FLAG_USE_LINKCHG_REG) {
6166 u32 mac_stat;
6167 int phy_event;
6168
6169 mac_stat = tr32(MAC_STATUS);
6170
6171 phy_event = 0;
6172 if (tp->tg3_flags & TG3_FLAG_USE_MI_INTERRUPT) {
6173 if (mac_stat & MAC_STATUS_MI_INTERRUPT)
6174 phy_event = 1;
6175 } else if (mac_stat & MAC_STATUS_LNKSTATE_CHANGED)
6176 phy_event = 1;
6177
6178 if (phy_event)
6179 tg3_setup_phy(tp, 0);
6180 } else if (tp->tg3_flags & TG3_FLAG_POLL_SERDES) {
6181 u32 mac_stat = tr32(MAC_STATUS);
6182 int need_setup = 0;
6183
6184 if (netif_carrier_ok(tp->dev) &&
6185 (mac_stat & MAC_STATUS_LNKSTATE_CHANGED)) {
6186 need_setup = 1;
6187 }
6188 if (! netif_carrier_ok(tp->dev) &&
6189 (mac_stat & (MAC_STATUS_PCS_SYNCED |
6190 MAC_STATUS_SIGNAL_DET))) {
6191 need_setup = 1;
6192 }
6193 if (need_setup) {
6194 tw32_f(MAC_MODE,
6195 (tp->mac_mode &
6196 ~MAC_MODE_PORT_MODE_MASK));
6197 udelay(40);
6198 tw32_f(MAC_MODE, tp->mac_mode);
6199 udelay(40);
6200 tg3_setup_phy(tp, 0);
6201 }
Michael Chan747e8f82005-07-25 12:33:22 -07006202 } else if (tp->tg3_flags2 & TG3_FLG2_MII_SERDES)
6203 tg3_serdes_parallel_detect(tp);
Linus Torvalds1da177e2005-04-16 15:20:36 -07006204
6205 tp->timer_counter = tp->timer_multiplier;
6206 }
6207
6208 /* Heartbeat is only sent once every 120 seconds. */
6209 if (!--tp->asf_counter) {
6210 if (tp->tg3_flags & TG3_FLAG_ENABLE_ASF) {
6211 u32 val;
6212
6213 tg3_write_mem(tp, NIC_SRAM_FW_CMD_MBOX, FWCMD_NICDRV_ALIVE);
6214 tg3_write_mem(tp, NIC_SRAM_FW_CMD_LEN_MBOX, 4);
6215 tg3_write_mem(tp, NIC_SRAM_FW_CMD_DATA_MBOX, 3);
6216 val = tr32(GRC_RX_CPU_EVENT);
6217 val |= (1 << 14);
6218 tw32(GRC_RX_CPU_EVENT, val);
6219 }
6220 tp->asf_counter = tp->asf_multiplier;
6221 }
6222
David S. Millerf47c11e2005-06-24 20:18:35 -07006223 spin_unlock(&tp->lock);
Linus Torvalds1da177e2005-04-16 15:20:36 -07006224
6225 tp->timer.expires = jiffies + tp->timer_offset;
6226 add_timer(&tp->timer);
6227}
6228
Michael Chan79381092005-04-21 17:13:59 -07006229static int tg3_test_interrupt(struct tg3 *tp)
6230{
6231 struct net_device *dev = tp->dev;
6232 int err, i;
6233 u32 int_mbox = 0;
6234
Michael Chand4bc3922005-05-29 14:59:20 -07006235 if (!netif_running(dev))
6236 return -ENODEV;
6237
Michael Chan79381092005-04-21 17:13:59 -07006238 tg3_disable_ints(tp);
6239
6240 free_irq(tp->pdev->irq, dev);
6241
6242 err = request_irq(tp->pdev->irq, tg3_test_isr,
David S. Millerf4d0ee92005-04-28 11:33:20 -07006243 SA_SHIRQ | SA_SAMPLE_RANDOM, dev->name, dev);
Michael Chan79381092005-04-21 17:13:59 -07006244 if (err)
6245 return err;
6246
6247 tg3_enable_ints(tp);
6248
6249 tw32_f(HOSTCC_MODE, tp->coalesce_mode | HOSTCC_MODE_ENABLE |
6250 HOSTCC_MODE_NOW);
6251
6252 for (i = 0; i < 5; i++) {
Michael Chan09ee9292005-08-09 20:17:00 -07006253 int_mbox = tr32_mailbox(MAILBOX_INTERRUPT_0 +
6254 TG3_64BIT_REG_LOW);
Michael Chan79381092005-04-21 17:13:59 -07006255 if (int_mbox != 0)
6256 break;
6257 msleep(10);
6258 }
6259
6260 tg3_disable_ints(tp);
6261
6262 free_irq(tp->pdev->irq, dev);
6263
6264 if (tp->tg3_flags2 & TG3_FLG2_USING_MSI)
6265 err = request_irq(tp->pdev->irq, tg3_msi,
David S. Millerf4d0ee92005-04-28 11:33:20 -07006266 SA_SAMPLE_RANDOM, dev->name, dev);
David S. Millerfac9b832005-05-18 22:46:34 -07006267 else {
6268 irqreturn_t (*fn)(int, void *, struct pt_regs *)=tg3_interrupt;
6269 if (tp->tg3_flags & TG3_FLAG_TAGGED_STATUS)
6270 fn = tg3_interrupt_tagged;
6271 err = request_irq(tp->pdev->irq, fn,
David S. Millerf4d0ee92005-04-28 11:33:20 -07006272 SA_SHIRQ | SA_SAMPLE_RANDOM, dev->name, dev);
David S. Millerfac9b832005-05-18 22:46:34 -07006273 }
Michael Chan79381092005-04-21 17:13:59 -07006274
6275 if (err)
6276 return err;
6277
6278 if (int_mbox != 0)
6279 return 0;
6280
6281 return -EIO;
6282}
6283
6284/* Returns 0 if MSI test succeeds or MSI test fails and INTx mode is
6285 * successfully restored
6286 */
6287static int tg3_test_msi(struct tg3 *tp)
6288{
6289 struct net_device *dev = tp->dev;
6290 int err;
6291 u16 pci_cmd;
6292
6293 if (!(tp->tg3_flags2 & TG3_FLG2_USING_MSI))
6294 return 0;
6295
6296 /* Turn off SERR reporting in case MSI terminates with Master
6297 * Abort.
6298 */
6299 pci_read_config_word(tp->pdev, PCI_COMMAND, &pci_cmd);
6300 pci_write_config_word(tp->pdev, PCI_COMMAND,
6301 pci_cmd & ~PCI_COMMAND_SERR);
6302
6303 err = tg3_test_interrupt(tp);
6304
6305 pci_write_config_word(tp->pdev, PCI_COMMAND, pci_cmd);
6306
6307 if (!err)
6308 return 0;
6309
6310 /* other failures */
6311 if (err != -EIO)
6312 return err;
6313
6314 /* MSI test failed, go back to INTx mode */
6315 printk(KERN_WARNING PFX "%s: No interrupt was generated using MSI, "
6316 "switching to INTx mode. Please report this failure to "
6317 "the PCI maintainer and include system chipset information.\n",
6318 tp->dev->name);
6319
6320 free_irq(tp->pdev->irq, dev);
6321 pci_disable_msi(tp->pdev);
6322
6323 tp->tg3_flags2 &= ~TG3_FLG2_USING_MSI;
6324
David S. Millerfac9b832005-05-18 22:46:34 -07006325 {
6326 irqreturn_t (*fn)(int, void *, struct pt_regs *)=tg3_interrupt;
6327 if (tp->tg3_flags & TG3_FLAG_TAGGED_STATUS)
6328 fn = tg3_interrupt_tagged;
Michael Chan79381092005-04-21 17:13:59 -07006329
David S. Millerfac9b832005-05-18 22:46:34 -07006330 err = request_irq(tp->pdev->irq, fn,
6331 SA_SHIRQ | SA_SAMPLE_RANDOM, dev->name, dev);
6332 }
Michael Chan79381092005-04-21 17:13:59 -07006333 if (err)
6334 return err;
6335
6336 /* Need to reset the chip because the MSI cycle may have terminated
6337 * with Master Abort.
6338 */
David S. Millerf47c11e2005-06-24 20:18:35 -07006339 tg3_full_lock(tp, 1);
Michael Chan79381092005-04-21 17:13:59 -07006340
Michael Chan944d9802005-05-29 14:57:48 -07006341 tg3_halt(tp, RESET_KIND_SHUTDOWN, 1);
Michael Chan79381092005-04-21 17:13:59 -07006342 err = tg3_init_hw(tp);
6343
David S. Millerf47c11e2005-06-24 20:18:35 -07006344 tg3_full_unlock(tp);
Michael Chan79381092005-04-21 17:13:59 -07006345
6346 if (err)
6347 free_irq(tp->pdev->irq, dev);
6348
6349 return err;
6350}
6351
Linus Torvalds1da177e2005-04-16 15:20:36 -07006352static int tg3_open(struct net_device *dev)
6353{
6354 struct tg3 *tp = netdev_priv(dev);
6355 int err;
6356
David S. Millerf47c11e2005-06-24 20:18:35 -07006357 tg3_full_lock(tp, 0);
Linus Torvalds1da177e2005-04-16 15:20:36 -07006358
6359 tg3_disable_ints(tp);
6360 tp->tg3_flags &= ~TG3_FLAG_INIT_COMPLETE;
6361
David S. Millerf47c11e2005-06-24 20:18:35 -07006362 tg3_full_unlock(tp);
Linus Torvalds1da177e2005-04-16 15:20:36 -07006363
6364 /* The placement of this call is tied
6365 * to the setup and use of Host TX descriptors.
6366 */
6367 err = tg3_alloc_consistent(tp);
6368 if (err)
6369 return err;
6370
Michael Chan88b06bc2005-04-21 17:13:25 -07006371 if ((tp->tg3_flags2 & TG3_FLG2_5750_PLUS) &&
6372 (GET_CHIP_REV(tp->pci_chip_rev_id) != CHIPREV_5750_AX) &&
6373 (GET_CHIP_REV(tp->pci_chip_rev_id) != CHIPREV_5750_BX)) {
David S. Millerfac9b832005-05-18 22:46:34 -07006374 /* All MSI supporting chips should support tagged
6375 * status. Assert that this is the case.
6376 */
6377 if (!(tp->tg3_flags & TG3_FLAG_TAGGED_STATUS)) {
6378 printk(KERN_WARNING PFX "%s: MSI without TAGGED? "
6379 "Not using MSI.\n", tp->dev->name);
6380 } else if (pci_enable_msi(tp->pdev) == 0) {
Michael Chan88b06bc2005-04-21 17:13:25 -07006381 u32 msi_mode;
6382
6383 msi_mode = tr32(MSGINT_MODE);
6384 tw32(MSGINT_MODE, msi_mode | MSGINT_MODE_ENABLE);
6385 tp->tg3_flags2 |= TG3_FLG2_USING_MSI;
6386 }
6387 }
6388 if (tp->tg3_flags2 & TG3_FLG2_USING_MSI)
6389 err = request_irq(tp->pdev->irq, tg3_msi,
David S. Millerf4d0ee92005-04-28 11:33:20 -07006390 SA_SAMPLE_RANDOM, dev->name, dev);
David S. Millerfac9b832005-05-18 22:46:34 -07006391 else {
6392 irqreturn_t (*fn)(int, void *, struct pt_regs *)=tg3_interrupt;
6393 if (tp->tg3_flags & TG3_FLAG_TAGGED_STATUS)
6394 fn = tg3_interrupt_tagged;
6395
6396 err = request_irq(tp->pdev->irq, fn,
David S. Millerf4d0ee92005-04-28 11:33:20 -07006397 SA_SHIRQ | SA_SAMPLE_RANDOM, dev->name, dev);
David S. Millerfac9b832005-05-18 22:46:34 -07006398 }
Linus Torvalds1da177e2005-04-16 15:20:36 -07006399
6400 if (err) {
Michael Chan88b06bc2005-04-21 17:13:25 -07006401 if (tp->tg3_flags2 & TG3_FLG2_USING_MSI) {
6402 pci_disable_msi(tp->pdev);
6403 tp->tg3_flags2 &= ~TG3_FLG2_USING_MSI;
6404 }
Linus Torvalds1da177e2005-04-16 15:20:36 -07006405 tg3_free_consistent(tp);
6406 return err;
6407 }
6408
David S. Millerf47c11e2005-06-24 20:18:35 -07006409 tg3_full_lock(tp, 0);
Linus Torvalds1da177e2005-04-16 15:20:36 -07006410
6411 err = tg3_init_hw(tp);
6412 if (err) {
Michael Chan944d9802005-05-29 14:57:48 -07006413 tg3_halt(tp, RESET_KIND_SHUTDOWN, 1);
Linus Torvalds1da177e2005-04-16 15:20:36 -07006414 tg3_free_rings(tp);
6415 } else {
David S. Millerfac9b832005-05-18 22:46:34 -07006416 if (tp->tg3_flags & TG3_FLAG_TAGGED_STATUS)
6417 tp->timer_offset = HZ;
6418 else
6419 tp->timer_offset = HZ / 10;
6420
6421 BUG_ON(tp->timer_offset > HZ);
6422 tp->timer_counter = tp->timer_multiplier =
6423 (HZ / tp->timer_offset);
6424 tp->asf_counter = tp->asf_multiplier =
6425 ((HZ / tp->timer_offset) * 120);
Linus Torvalds1da177e2005-04-16 15:20:36 -07006426
6427 init_timer(&tp->timer);
6428 tp->timer.expires = jiffies + tp->timer_offset;
6429 tp->timer.data = (unsigned long) tp;
6430 tp->timer.function = tg3_timer;
Linus Torvalds1da177e2005-04-16 15:20:36 -07006431 }
6432
David S. Millerf47c11e2005-06-24 20:18:35 -07006433 tg3_full_unlock(tp);
Linus Torvalds1da177e2005-04-16 15:20:36 -07006434
6435 if (err) {
Michael Chan88b06bc2005-04-21 17:13:25 -07006436 free_irq(tp->pdev->irq, dev);
6437 if (tp->tg3_flags2 & TG3_FLG2_USING_MSI) {
6438 pci_disable_msi(tp->pdev);
6439 tp->tg3_flags2 &= ~TG3_FLG2_USING_MSI;
6440 }
Linus Torvalds1da177e2005-04-16 15:20:36 -07006441 tg3_free_consistent(tp);
6442 return err;
6443 }
6444
Michael Chan79381092005-04-21 17:13:59 -07006445 if (tp->tg3_flags2 & TG3_FLG2_USING_MSI) {
6446 err = tg3_test_msi(tp);
David S. Millerfac9b832005-05-18 22:46:34 -07006447
Michael Chan79381092005-04-21 17:13:59 -07006448 if (err) {
David S. Millerf47c11e2005-06-24 20:18:35 -07006449 tg3_full_lock(tp, 0);
Michael Chan79381092005-04-21 17:13:59 -07006450
6451 if (tp->tg3_flags2 & TG3_FLG2_USING_MSI) {
6452 pci_disable_msi(tp->pdev);
6453 tp->tg3_flags2 &= ~TG3_FLG2_USING_MSI;
6454 }
Michael Chan944d9802005-05-29 14:57:48 -07006455 tg3_halt(tp, RESET_KIND_SHUTDOWN, 1);
Michael Chan79381092005-04-21 17:13:59 -07006456 tg3_free_rings(tp);
6457 tg3_free_consistent(tp);
6458
David S. Millerf47c11e2005-06-24 20:18:35 -07006459 tg3_full_unlock(tp);
Michael Chan79381092005-04-21 17:13:59 -07006460
6461 return err;
6462 }
6463 }
6464
David S. Millerf47c11e2005-06-24 20:18:35 -07006465 tg3_full_lock(tp, 0);
Linus Torvalds1da177e2005-04-16 15:20:36 -07006466
Michael Chan79381092005-04-21 17:13:59 -07006467 add_timer(&tp->timer);
6468 tp->tg3_flags |= TG3_FLAG_INIT_COMPLETE;
Linus Torvalds1da177e2005-04-16 15:20:36 -07006469 tg3_enable_ints(tp);
6470
David S. Millerf47c11e2005-06-24 20:18:35 -07006471 tg3_full_unlock(tp);
Linus Torvalds1da177e2005-04-16 15:20:36 -07006472
6473 netif_start_queue(dev);
6474
6475 return 0;
6476}
6477
6478#if 0
6479/*static*/ void tg3_dump_state(struct tg3 *tp)
6480{
6481 u32 val32, val32_2, val32_3, val32_4, val32_5;
6482 u16 val16;
6483 int i;
6484
6485 pci_read_config_word(tp->pdev, PCI_STATUS, &val16);
6486 pci_read_config_dword(tp->pdev, TG3PCI_PCISTATE, &val32);
6487 printk("DEBUG: PCI status [%04x] TG3PCI state[%08x]\n",
6488 val16, val32);
6489
6490 /* MAC block */
6491 printk("DEBUG: MAC_MODE[%08x] MAC_STATUS[%08x]\n",
6492 tr32(MAC_MODE), tr32(MAC_STATUS));
6493 printk(" MAC_EVENT[%08x] MAC_LED_CTRL[%08x]\n",
6494 tr32(MAC_EVENT), tr32(MAC_LED_CTRL));
6495 printk("DEBUG: MAC_TX_MODE[%08x] MAC_TX_STATUS[%08x]\n",
6496 tr32(MAC_TX_MODE), tr32(MAC_TX_STATUS));
6497 printk(" MAC_RX_MODE[%08x] MAC_RX_STATUS[%08x]\n",
6498 tr32(MAC_RX_MODE), tr32(MAC_RX_STATUS));
6499
6500 /* Send data initiator control block */
6501 printk("DEBUG: SNDDATAI_MODE[%08x] SNDDATAI_STATUS[%08x]\n",
6502 tr32(SNDDATAI_MODE), tr32(SNDDATAI_STATUS));
6503 printk(" SNDDATAI_STATSCTRL[%08x]\n",
6504 tr32(SNDDATAI_STATSCTRL));
6505
6506 /* Send data completion control block */
6507 printk("DEBUG: SNDDATAC_MODE[%08x]\n", tr32(SNDDATAC_MODE));
6508
6509 /* Send BD ring selector block */
6510 printk("DEBUG: SNDBDS_MODE[%08x] SNDBDS_STATUS[%08x]\n",
6511 tr32(SNDBDS_MODE), tr32(SNDBDS_STATUS));
6512
6513 /* Send BD initiator control block */
6514 printk("DEBUG: SNDBDI_MODE[%08x] SNDBDI_STATUS[%08x]\n",
6515 tr32(SNDBDI_MODE), tr32(SNDBDI_STATUS));
6516
6517 /* Send BD completion control block */
6518 printk("DEBUG: SNDBDC_MODE[%08x]\n", tr32(SNDBDC_MODE));
6519
6520 /* Receive list placement control block */
6521 printk("DEBUG: RCVLPC_MODE[%08x] RCVLPC_STATUS[%08x]\n",
6522 tr32(RCVLPC_MODE), tr32(RCVLPC_STATUS));
6523 printk(" RCVLPC_STATSCTRL[%08x]\n",
6524 tr32(RCVLPC_STATSCTRL));
6525
6526 /* Receive data and receive BD initiator control block */
6527 printk("DEBUG: RCVDBDI_MODE[%08x] RCVDBDI_STATUS[%08x]\n",
6528 tr32(RCVDBDI_MODE), tr32(RCVDBDI_STATUS));
6529
6530 /* Receive data completion control block */
6531 printk("DEBUG: RCVDCC_MODE[%08x]\n",
6532 tr32(RCVDCC_MODE));
6533
6534 /* Receive BD initiator control block */
6535 printk("DEBUG: RCVBDI_MODE[%08x] RCVBDI_STATUS[%08x]\n",
6536 tr32(RCVBDI_MODE), tr32(RCVBDI_STATUS));
6537
6538 /* Receive BD completion control block */
6539 printk("DEBUG: RCVCC_MODE[%08x] RCVCC_STATUS[%08x]\n",
6540 tr32(RCVCC_MODE), tr32(RCVCC_STATUS));
6541
6542 /* Receive list selector control block */
6543 printk("DEBUG: RCVLSC_MODE[%08x] RCVLSC_STATUS[%08x]\n",
6544 tr32(RCVLSC_MODE), tr32(RCVLSC_STATUS));
6545
6546 /* Mbuf cluster free block */
6547 printk("DEBUG: MBFREE_MODE[%08x] MBFREE_STATUS[%08x]\n",
6548 tr32(MBFREE_MODE), tr32(MBFREE_STATUS));
6549
6550 /* Host coalescing control block */
6551 printk("DEBUG: HOSTCC_MODE[%08x] HOSTCC_STATUS[%08x]\n",
6552 tr32(HOSTCC_MODE), tr32(HOSTCC_STATUS));
6553 printk("DEBUG: HOSTCC_STATS_BLK_HOST_ADDR[%08x%08x]\n",
6554 tr32(HOSTCC_STATS_BLK_HOST_ADDR + TG3_64BIT_REG_HIGH),
6555 tr32(HOSTCC_STATS_BLK_HOST_ADDR + TG3_64BIT_REG_LOW));
6556 printk("DEBUG: HOSTCC_STATUS_BLK_HOST_ADDR[%08x%08x]\n",
6557 tr32(HOSTCC_STATUS_BLK_HOST_ADDR + TG3_64BIT_REG_HIGH),
6558 tr32(HOSTCC_STATUS_BLK_HOST_ADDR + TG3_64BIT_REG_LOW));
6559 printk("DEBUG: HOSTCC_STATS_BLK_NIC_ADDR[%08x]\n",
6560 tr32(HOSTCC_STATS_BLK_NIC_ADDR));
6561 printk("DEBUG: HOSTCC_STATUS_BLK_NIC_ADDR[%08x]\n",
6562 tr32(HOSTCC_STATUS_BLK_NIC_ADDR));
6563
6564 /* Memory arbiter control block */
6565 printk("DEBUG: MEMARB_MODE[%08x] MEMARB_STATUS[%08x]\n",
6566 tr32(MEMARB_MODE), tr32(MEMARB_STATUS));
6567
6568 /* Buffer manager control block */
6569 printk("DEBUG: BUFMGR_MODE[%08x] BUFMGR_STATUS[%08x]\n",
6570 tr32(BUFMGR_MODE), tr32(BUFMGR_STATUS));
6571 printk("DEBUG: BUFMGR_MB_POOL_ADDR[%08x] BUFMGR_MB_POOL_SIZE[%08x]\n",
6572 tr32(BUFMGR_MB_POOL_ADDR), tr32(BUFMGR_MB_POOL_SIZE));
6573 printk("DEBUG: BUFMGR_DMA_DESC_POOL_ADDR[%08x] "
6574 "BUFMGR_DMA_DESC_POOL_SIZE[%08x]\n",
6575 tr32(BUFMGR_DMA_DESC_POOL_ADDR),
6576 tr32(BUFMGR_DMA_DESC_POOL_SIZE));
6577
6578 /* Read DMA control block */
6579 printk("DEBUG: RDMAC_MODE[%08x] RDMAC_STATUS[%08x]\n",
6580 tr32(RDMAC_MODE), tr32(RDMAC_STATUS));
6581
6582 /* Write DMA control block */
6583 printk("DEBUG: WDMAC_MODE[%08x] WDMAC_STATUS[%08x]\n",
6584 tr32(WDMAC_MODE), tr32(WDMAC_STATUS));
6585
6586 /* DMA completion block */
6587 printk("DEBUG: DMAC_MODE[%08x]\n",
6588 tr32(DMAC_MODE));
6589
6590 /* GRC block */
6591 printk("DEBUG: GRC_MODE[%08x] GRC_MISC_CFG[%08x]\n",
6592 tr32(GRC_MODE), tr32(GRC_MISC_CFG));
6593 printk("DEBUG: GRC_LOCAL_CTRL[%08x]\n",
6594 tr32(GRC_LOCAL_CTRL));
6595
6596 /* TG3_BDINFOs */
6597 printk("DEBUG: RCVDBDI_JUMBO_BD[%08x%08x:%08x:%08x]\n",
6598 tr32(RCVDBDI_JUMBO_BD + 0x0),
6599 tr32(RCVDBDI_JUMBO_BD + 0x4),
6600 tr32(RCVDBDI_JUMBO_BD + 0x8),
6601 tr32(RCVDBDI_JUMBO_BD + 0xc));
6602 printk("DEBUG: RCVDBDI_STD_BD[%08x%08x:%08x:%08x]\n",
6603 tr32(RCVDBDI_STD_BD + 0x0),
6604 tr32(RCVDBDI_STD_BD + 0x4),
6605 tr32(RCVDBDI_STD_BD + 0x8),
6606 tr32(RCVDBDI_STD_BD + 0xc));
6607 printk("DEBUG: RCVDBDI_MINI_BD[%08x%08x:%08x:%08x]\n",
6608 tr32(RCVDBDI_MINI_BD + 0x0),
6609 tr32(RCVDBDI_MINI_BD + 0x4),
6610 tr32(RCVDBDI_MINI_BD + 0x8),
6611 tr32(RCVDBDI_MINI_BD + 0xc));
6612
6613 tg3_read_mem(tp, NIC_SRAM_SEND_RCB + 0x0, &val32);
6614 tg3_read_mem(tp, NIC_SRAM_SEND_RCB + 0x4, &val32_2);
6615 tg3_read_mem(tp, NIC_SRAM_SEND_RCB + 0x8, &val32_3);
6616 tg3_read_mem(tp, NIC_SRAM_SEND_RCB + 0xc, &val32_4);
6617 printk("DEBUG: SRAM_SEND_RCB_0[%08x%08x:%08x:%08x]\n",
6618 val32, val32_2, val32_3, val32_4);
6619
6620 tg3_read_mem(tp, NIC_SRAM_RCV_RET_RCB + 0x0, &val32);
6621 tg3_read_mem(tp, NIC_SRAM_RCV_RET_RCB + 0x4, &val32_2);
6622 tg3_read_mem(tp, NIC_SRAM_RCV_RET_RCB + 0x8, &val32_3);
6623 tg3_read_mem(tp, NIC_SRAM_RCV_RET_RCB + 0xc, &val32_4);
6624 printk("DEBUG: SRAM_RCV_RET_RCB_0[%08x%08x:%08x:%08x]\n",
6625 val32, val32_2, val32_3, val32_4);
6626
6627 tg3_read_mem(tp, NIC_SRAM_STATUS_BLK + 0x0, &val32);
6628 tg3_read_mem(tp, NIC_SRAM_STATUS_BLK + 0x4, &val32_2);
6629 tg3_read_mem(tp, NIC_SRAM_STATUS_BLK + 0x8, &val32_3);
6630 tg3_read_mem(tp, NIC_SRAM_STATUS_BLK + 0xc, &val32_4);
6631 tg3_read_mem(tp, NIC_SRAM_STATUS_BLK + 0x10, &val32_5);
6632 printk("DEBUG: SRAM_STATUS_BLK[%08x:%08x:%08x:%08x:%08x]\n",
6633 val32, val32_2, val32_3, val32_4, val32_5);
6634
6635 /* SW status block */
6636 printk("DEBUG: Host status block [%08x:%08x:(%04x:%04x:%04x):(%04x:%04x)]\n",
6637 tp->hw_status->status,
6638 tp->hw_status->status_tag,
6639 tp->hw_status->rx_jumbo_consumer,
6640 tp->hw_status->rx_consumer,
6641 tp->hw_status->rx_mini_consumer,
6642 tp->hw_status->idx[0].rx_producer,
6643 tp->hw_status->idx[0].tx_consumer);
6644
6645 /* SW statistics block */
6646 printk("DEBUG: Host statistics block [%08x:%08x:%08x:%08x]\n",
6647 ((u32 *)tp->hw_stats)[0],
6648 ((u32 *)tp->hw_stats)[1],
6649 ((u32 *)tp->hw_stats)[2],
6650 ((u32 *)tp->hw_stats)[3]);
6651
6652 /* Mailboxes */
6653 printk("DEBUG: SNDHOST_PROD[%08x%08x] SNDNIC_PROD[%08x%08x]\n",
Michael Chan09ee9292005-08-09 20:17:00 -07006654 tr32_mailbox(MAILBOX_SNDHOST_PROD_IDX_0 + 0x0),
6655 tr32_mailbox(MAILBOX_SNDHOST_PROD_IDX_0 + 0x4),
6656 tr32_mailbox(MAILBOX_SNDNIC_PROD_IDX_0 + 0x0),
6657 tr32_mailbox(MAILBOX_SNDNIC_PROD_IDX_0 + 0x4));
Linus Torvalds1da177e2005-04-16 15:20:36 -07006658
6659 /* NIC side send descriptors. */
6660 for (i = 0; i < 6; i++) {
6661 unsigned long txd;
6662
6663 txd = tp->regs + NIC_SRAM_WIN_BASE + NIC_SRAM_TX_BUFFER_DESC
6664 + (i * sizeof(struct tg3_tx_buffer_desc));
6665 printk("DEBUG: NIC TXD(%d)[%08x:%08x:%08x:%08x]\n",
6666 i,
6667 readl(txd + 0x0), readl(txd + 0x4),
6668 readl(txd + 0x8), readl(txd + 0xc));
6669 }
6670
6671 /* NIC side RX descriptors. */
6672 for (i = 0; i < 6; i++) {
6673 unsigned long rxd;
6674
6675 rxd = tp->regs + NIC_SRAM_WIN_BASE + NIC_SRAM_RX_BUFFER_DESC
6676 + (i * sizeof(struct tg3_rx_buffer_desc));
6677 printk("DEBUG: NIC RXD_STD(%d)[0][%08x:%08x:%08x:%08x]\n",
6678 i,
6679 readl(rxd + 0x0), readl(rxd + 0x4),
6680 readl(rxd + 0x8), readl(rxd + 0xc));
6681 rxd += (4 * sizeof(u32));
6682 printk("DEBUG: NIC RXD_STD(%d)[1][%08x:%08x:%08x:%08x]\n",
6683 i,
6684 readl(rxd + 0x0), readl(rxd + 0x4),
6685 readl(rxd + 0x8), readl(rxd + 0xc));
6686 }
6687
6688 for (i = 0; i < 6; i++) {
6689 unsigned long rxd;
6690
6691 rxd = tp->regs + NIC_SRAM_WIN_BASE + NIC_SRAM_RX_JUMBO_BUFFER_DESC
6692 + (i * sizeof(struct tg3_rx_buffer_desc));
6693 printk("DEBUG: NIC RXD_JUMBO(%d)[0][%08x:%08x:%08x:%08x]\n",
6694 i,
6695 readl(rxd + 0x0), readl(rxd + 0x4),
6696 readl(rxd + 0x8), readl(rxd + 0xc));
6697 rxd += (4 * sizeof(u32));
6698 printk("DEBUG: NIC RXD_JUMBO(%d)[1][%08x:%08x:%08x:%08x]\n",
6699 i,
6700 readl(rxd + 0x0), readl(rxd + 0x4),
6701 readl(rxd + 0x8), readl(rxd + 0xc));
6702 }
6703}
6704#endif
6705
6706static struct net_device_stats *tg3_get_stats(struct net_device *);
6707static struct tg3_ethtool_stats *tg3_get_estats(struct tg3 *);
6708
6709static int tg3_close(struct net_device *dev)
6710{
6711 struct tg3 *tp = netdev_priv(dev);
6712
6713 netif_stop_queue(dev);
6714
6715 del_timer_sync(&tp->timer);
6716
David S. Millerf47c11e2005-06-24 20:18:35 -07006717 tg3_full_lock(tp, 1);
Linus Torvalds1da177e2005-04-16 15:20:36 -07006718#if 0
6719 tg3_dump_state(tp);
6720#endif
6721
6722 tg3_disable_ints(tp);
6723
Michael Chan944d9802005-05-29 14:57:48 -07006724 tg3_halt(tp, RESET_KIND_SHUTDOWN, 1);
Linus Torvalds1da177e2005-04-16 15:20:36 -07006725 tg3_free_rings(tp);
6726 tp->tg3_flags &=
6727 ~(TG3_FLAG_INIT_COMPLETE |
6728 TG3_FLAG_GOT_SERDES_FLOWCTL);
6729 netif_carrier_off(tp->dev);
6730
David S. Millerf47c11e2005-06-24 20:18:35 -07006731 tg3_full_unlock(tp);
Linus Torvalds1da177e2005-04-16 15:20:36 -07006732
Michael Chan88b06bc2005-04-21 17:13:25 -07006733 free_irq(tp->pdev->irq, dev);
6734 if (tp->tg3_flags2 & TG3_FLG2_USING_MSI) {
6735 pci_disable_msi(tp->pdev);
6736 tp->tg3_flags2 &= ~TG3_FLG2_USING_MSI;
6737 }
Linus Torvalds1da177e2005-04-16 15:20:36 -07006738
6739 memcpy(&tp->net_stats_prev, tg3_get_stats(tp->dev),
6740 sizeof(tp->net_stats_prev));
6741 memcpy(&tp->estats_prev, tg3_get_estats(tp),
6742 sizeof(tp->estats_prev));
6743
6744 tg3_free_consistent(tp);
6745
6746 return 0;
6747}
6748
6749static inline unsigned long get_stat64(tg3_stat64_t *val)
6750{
6751 unsigned long ret;
6752
6753#if (BITS_PER_LONG == 32)
6754 ret = val->low;
6755#else
6756 ret = ((u64)val->high << 32) | ((u64)val->low);
6757#endif
6758 return ret;
6759}
6760
6761static unsigned long calc_crc_errors(struct tg3 *tp)
6762{
6763 struct tg3_hw_stats *hw_stats = tp->hw_stats;
6764
6765 if (!(tp->tg3_flags2 & TG3_FLG2_PHY_SERDES) &&
6766 (GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_5700 ||
6767 GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_5701)) {
Linus Torvalds1da177e2005-04-16 15:20:36 -07006768 u32 val;
6769
David S. Millerf47c11e2005-06-24 20:18:35 -07006770 spin_lock_bh(&tp->lock);
Linus Torvalds1da177e2005-04-16 15:20:36 -07006771 if (!tg3_readphy(tp, 0x1e, &val)) {
6772 tg3_writephy(tp, 0x1e, val | 0x8000);
6773 tg3_readphy(tp, 0x14, &val);
6774 } else
6775 val = 0;
David S. Millerf47c11e2005-06-24 20:18:35 -07006776 spin_unlock_bh(&tp->lock);
Linus Torvalds1da177e2005-04-16 15:20:36 -07006777
6778 tp->phy_crc_errors += val;
6779
6780 return tp->phy_crc_errors;
6781 }
6782
6783 return get_stat64(&hw_stats->rx_fcs_errors);
6784}
6785
6786#define ESTAT_ADD(member) \
6787 estats->member = old_estats->member + \
6788 get_stat64(&hw_stats->member)
6789
6790static struct tg3_ethtool_stats *tg3_get_estats(struct tg3 *tp)
6791{
6792 struct tg3_ethtool_stats *estats = &tp->estats;
6793 struct tg3_ethtool_stats *old_estats = &tp->estats_prev;
6794 struct tg3_hw_stats *hw_stats = tp->hw_stats;
6795
6796 if (!hw_stats)
6797 return old_estats;
6798
6799 ESTAT_ADD(rx_octets);
6800 ESTAT_ADD(rx_fragments);
6801 ESTAT_ADD(rx_ucast_packets);
6802 ESTAT_ADD(rx_mcast_packets);
6803 ESTAT_ADD(rx_bcast_packets);
6804 ESTAT_ADD(rx_fcs_errors);
6805 ESTAT_ADD(rx_align_errors);
6806 ESTAT_ADD(rx_xon_pause_rcvd);
6807 ESTAT_ADD(rx_xoff_pause_rcvd);
6808 ESTAT_ADD(rx_mac_ctrl_rcvd);
6809 ESTAT_ADD(rx_xoff_entered);
6810 ESTAT_ADD(rx_frame_too_long_errors);
6811 ESTAT_ADD(rx_jabbers);
6812 ESTAT_ADD(rx_undersize_packets);
6813 ESTAT_ADD(rx_in_length_errors);
6814 ESTAT_ADD(rx_out_length_errors);
6815 ESTAT_ADD(rx_64_or_less_octet_packets);
6816 ESTAT_ADD(rx_65_to_127_octet_packets);
6817 ESTAT_ADD(rx_128_to_255_octet_packets);
6818 ESTAT_ADD(rx_256_to_511_octet_packets);
6819 ESTAT_ADD(rx_512_to_1023_octet_packets);
6820 ESTAT_ADD(rx_1024_to_1522_octet_packets);
6821 ESTAT_ADD(rx_1523_to_2047_octet_packets);
6822 ESTAT_ADD(rx_2048_to_4095_octet_packets);
6823 ESTAT_ADD(rx_4096_to_8191_octet_packets);
6824 ESTAT_ADD(rx_8192_to_9022_octet_packets);
6825
6826 ESTAT_ADD(tx_octets);
6827 ESTAT_ADD(tx_collisions);
6828 ESTAT_ADD(tx_xon_sent);
6829 ESTAT_ADD(tx_xoff_sent);
6830 ESTAT_ADD(tx_flow_control);
6831 ESTAT_ADD(tx_mac_errors);
6832 ESTAT_ADD(tx_single_collisions);
6833 ESTAT_ADD(tx_mult_collisions);
6834 ESTAT_ADD(tx_deferred);
6835 ESTAT_ADD(tx_excessive_collisions);
6836 ESTAT_ADD(tx_late_collisions);
6837 ESTAT_ADD(tx_collide_2times);
6838 ESTAT_ADD(tx_collide_3times);
6839 ESTAT_ADD(tx_collide_4times);
6840 ESTAT_ADD(tx_collide_5times);
6841 ESTAT_ADD(tx_collide_6times);
6842 ESTAT_ADD(tx_collide_7times);
6843 ESTAT_ADD(tx_collide_8times);
6844 ESTAT_ADD(tx_collide_9times);
6845 ESTAT_ADD(tx_collide_10times);
6846 ESTAT_ADD(tx_collide_11times);
6847 ESTAT_ADD(tx_collide_12times);
6848 ESTAT_ADD(tx_collide_13times);
6849 ESTAT_ADD(tx_collide_14times);
6850 ESTAT_ADD(tx_collide_15times);
6851 ESTAT_ADD(tx_ucast_packets);
6852 ESTAT_ADD(tx_mcast_packets);
6853 ESTAT_ADD(tx_bcast_packets);
6854 ESTAT_ADD(tx_carrier_sense_errors);
6855 ESTAT_ADD(tx_discards);
6856 ESTAT_ADD(tx_errors);
6857
6858 ESTAT_ADD(dma_writeq_full);
6859 ESTAT_ADD(dma_write_prioq_full);
6860 ESTAT_ADD(rxbds_empty);
6861 ESTAT_ADD(rx_discards);
6862 ESTAT_ADD(rx_errors);
6863 ESTAT_ADD(rx_threshold_hit);
6864
6865 ESTAT_ADD(dma_readq_full);
6866 ESTAT_ADD(dma_read_prioq_full);
6867 ESTAT_ADD(tx_comp_queue_full);
6868
6869 ESTAT_ADD(ring_set_send_prod_index);
6870 ESTAT_ADD(ring_status_update);
6871 ESTAT_ADD(nic_irqs);
6872 ESTAT_ADD(nic_avoided_irqs);
6873 ESTAT_ADD(nic_tx_threshold_hit);
6874
6875 return estats;
6876}
6877
6878static struct net_device_stats *tg3_get_stats(struct net_device *dev)
6879{
6880 struct tg3 *tp = netdev_priv(dev);
6881 struct net_device_stats *stats = &tp->net_stats;
6882 struct net_device_stats *old_stats = &tp->net_stats_prev;
6883 struct tg3_hw_stats *hw_stats = tp->hw_stats;
6884
6885 if (!hw_stats)
6886 return old_stats;
6887
6888 stats->rx_packets = old_stats->rx_packets +
6889 get_stat64(&hw_stats->rx_ucast_packets) +
6890 get_stat64(&hw_stats->rx_mcast_packets) +
6891 get_stat64(&hw_stats->rx_bcast_packets);
6892
6893 stats->tx_packets = old_stats->tx_packets +
6894 get_stat64(&hw_stats->tx_ucast_packets) +
6895 get_stat64(&hw_stats->tx_mcast_packets) +
6896 get_stat64(&hw_stats->tx_bcast_packets);
6897
6898 stats->rx_bytes = old_stats->rx_bytes +
6899 get_stat64(&hw_stats->rx_octets);
6900 stats->tx_bytes = old_stats->tx_bytes +
6901 get_stat64(&hw_stats->tx_octets);
6902
6903 stats->rx_errors = old_stats->rx_errors +
6904 get_stat64(&hw_stats->rx_errors) +
6905 get_stat64(&hw_stats->rx_discards);
6906 stats->tx_errors = old_stats->tx_errors +
6907 get_stat64(&hw_stats->tx_errors) +
6908 get_stat64(&hw_stats->tx_mac_errors) +
6909 get_stat64(&hw_stats->tx_carrier_sense_errors) +
6910 get_stat64(&hw_stats->tx_discards);
6911
6912 stats->multicast = old_stats->multicast +
6913 get_stat64(&hw_stats->rx_mcast_packets);
6914 stats->collisions = old_stats->collisions +
6915 get_stat64(&hw_stats->tx_collisions);
6916
6917 stats->rx_length_errors = old_stats->rx_length_errors +
6918 get_stat64(&hw_stats->rx_frame_too_long_errors) +
6919 get_stat64(&hw_stats->rx_undersize_packets);
6920
6921 stats->rx_over_errors = old_stats->rx_over_errors +
6922 get_stat64(&hw_stats->rxbds_empty);
6923 stats->rx_frame_errors = old_stats->rx_frame_errors +
6924 get_stat64(&hw_stats->rx_align_errors);
6925 stats->tx_aborted_errors = old_stats->tx_aborted_errors +
6926 get_stat64(&hw_stats->tx_discards);
6927 stats->tx_carrier_errors = old_stats->tx_carrier_errors +
6928 get_stat64(&hw_stats->tx_carrier_sense_errors);
6929
6930 stats->rx_crc_errors = old_stats->rx_crc_errors +
6931 calc_crc_errors(tp);
6932
6933 return stats;
6934}
6935
6936static inline u32 calc_crc(unsigned char *buf, int len)
6937{
6938 u32 reg;
6939 u32 tmp;
6940 int j, k;
6941
6942 reg = 0xffffffff;
6943
6944 for (j = 0; j < len; j++) {
6945 reg ^= buf[j];
6946
6947 for (k = 0; k < 8; k++) {
6948 tmp = reg & 0x01;
6949
6950 reg >>= 1;
6951
6952 if (tmp) {
6953 reg ^= 0xedb88320;
6954 }
6955 }
6956 }
6957
6958 return ~reg;
6959}
6960
6961static void tg3_set_multi(struct tg3 *tp, unsigned int accept_all)
6962{
6963 /* accept or reject all multicast frames */
6964 tw32(MAC_HASH_REG_0, accept_all ? 0xffffffff : 0);
6965 tw32(MAC_HASH_REG_1, accept_all ? 0xffffffff : 0);
6966 tw32(MAC_HASH_REG_2, accept_all ? 0xffffffff : 0);
6967 tw32(MAC_HASH_REG_3, accept_all ? 0xffffffff : 0);
6968}
6969
6970static void __tg3_set_rx_mode(struct net_device *dev)
6971{
6972 struct tg3 *tp = netdev_priv(dev);
6973 u32 rx_mode;
6974
6975 rx_mode = tp->rx_mode & ~(RX_MODE_PROMISC |
6976 RX_MODE_KEEP_VLAN_TAG);
6977
6978 /* When ASF is in use, we always keep the RX_MODE_KEEP_VLAN_TAG
6979 * flag clear.
6980 */
6981#if TG3_VLAN_TAG_USED
6982 if (!tp->vlgrp &&
6983 !(tp->tg3_flags & TG3_FLAG_ENABLE_ASF))
6984 rx_mode |= RX_MODE_KEEP_VLAN_TAG;
6985#else
6986 /* By definition, VLAN is disabled always in this
6987 * case.
6988 */
6989 if (!(tp->tg3_flags & TG3_FLAG_ENABLE_ASF))
6990 rx_mode |= RX_MODE_KEEP_VLAN_TAG;
6991#endif
6992
6993 if (dev->flags & IFF_PROMISC) {
6994 /* Promiscuous mode. */
6995 rx_mode |= RX_MODE_PROMISC;
6996 } else if (dev->flags & IFF_ALLMULTI) {
6997 /* Accept all multicast. */
6998 tg3_set_multi (tp, 1);
6999 } else if (dev->mc_count < 1) {
7000 /* Reject all multicast. */
7001 tg3_set_multi (tp, 0);
7002 } else {
7003 /* Accept one or more multicast(s). */
7004 struct dev_mc_list *mclist;
7005 unsigned int i;
7006 u32 mc_filter[4] = { 0, };
7007 u32 regidx;
7008 u32 bit;
7009 u32 crc;
7010
7011 for (i = 0, mclist = dev->mc_list; mclist && i < dev->mc_count;
7012 i++, mclist = mclist->next) {
7013
7014 crc = calc_crc (mclist->dmi_addr, ETH_ALEN);
7015 bit = ~crc & 0x7f;
7016 regidx = (bit & 0x60) >> 5;
7017 bit &= 0x1f;
7018 mc_filter[regidx] |= (1 << bit);
7019 }
7020
7021 tw32(MAC_HASH_REG_0, mc_filter[0]);
7022 tw32(MAC_HASH_REG_1, mc_filter[1]);
7023 tw32(MAC_HASH_REG_2, mc_filter[2]);
7024 tw32(MAC_HASH_REG_3, mc_filter[3]);
7025 }
7026
7027 if (rx_mode != tp->rx_mode) {
7028 tp->rx_mode = rx_mode;
7029 tw32_f(MAC_RX_MODE, rx_mode);
7030 udelay(10);
7031 }
7032}
7033
7034static void tg3_set_rx_mode(struct net_device *dev)
7035{
7036 struct tg3 *tp = netdev_priv(dev);
7037
David S. Millerf47c11e2005-06-24 20:18:35 -07007038 tg3_full_lock(tp, 0);
Linus Torvalds1da177e2005-04-16 15:20:36 -07007039 __tg3_set_rx_mode(dev);
David S. Millerf47c11e2005-06-24 20:18:35 -07007040 tg3_full_unlock(tp);
Linus Torvalds1da177e2005-04-16 15:20:36 -07007041}
7042
7043#define TG3_REGDUMP_LEN (32 * 1024)
7044
7045static int tg3_get_regs_len(struct net_device *dev)
7046{
7047 return TG3_REGDUMP_LEN;
7048}
7049
7050static void tg3_get_regs(struct net_device *dev,
7051 struct ethtool_regs *regs, void *_p)
7052{
7053 u32 *p = _p;
7054 struct tg3 *tp = netdev_priv(dev);
7055 u8 *orig_p = _p;
7056 int i;
7057
7058 regs->version = 0;
7059
7060 memset(p, 0, TG3_REGDUMP_LEN);
7061
David S. Millerf47c11e2005-06-24 20:18:35 -07007062 tg3_full_lock(tp, 0);
Linus Torvalds1da177e2005-04-16 15:20:36 -07007063
7064#define __GET_REG32(reg) (*(p)++ = tr32(reg))
7065#define GET_REG32_LOOP(base,len) \
7066do { p = (u32 *)(orig_p + (base)); \
7067 for (i = 0; i < len; i += 4) \
7068 __GET_REG32((base) + i); \
7069} while (0)
7070#define GET_REG32_1(reg) \
7071do { p = (u32 *)(orig_p + (reg)); \
7072 __GET_REG32((reg)); \
7073} while (0)
7074
7075 GET_REG32_LOOP(TG3PCI_VENDOR, 0xb0);
7076 GET_REG32_LOOP(MAILBOX_INTERRUPT_0, 0x200);
7077 GET_REG32_LOOP(MAC_MODE, 0x4f0);
7078 GET_REG32_LOOP(SNDDATAI_MODE, 0xe0);
7079 GET_REG32_1(SNDDATAC_MODE);
7080 GET_REG32_LOOP(SNDBDS_MODE, 0x80);
7081 GET_REG32_LOOP(SNDBDI_MODE, 0x48);
7082 GET_REG32_1(SNDBDC_MODE);
7083 GET_REG32_LOOP(RCVLPC_MODE, 0x20);
7084 GET_REG32_LOOP(RCVLPC_SELLST_BASE, 0x15c);
7085 GET_REG32_LOOP(RCVDBDI_MODE, 0x0c);
7086 GET_REG32_LOOP(RCVDBDI_JUMBO_BD, 0x3c);
7087 GET_REG32_LOOP(RCVDBDI_BD_PROD_IDX_0, 0x44);
7088 GET_REG32_1(RCVDCC_MODE);
7089 GET_REG32_LOOP(RCVBDI_MODE, 0x20);
7090 GET_REG32_LOOP(RCVCC_MODE, 0x14);
7091 GET_REG32_LOOP(RCVLSC_MODE, 0x08);
7092 GET_REG32_1(MBFREE_MODE);
7093 GET_REG32_LOOP(HOSTCC_MODE, 0x100);
7094 GET_REG32_LOOP(MEMARB_MODE, 0x10);
7095 GET_REG32_LOOP(BUFMGR_MODE, 0x58);
7096 GET_REG32_LOOP(RDMAC_MODE, 0x08);
7097 GET_REG32_LOOP(WDMAC_MODE, 0x08);
7098 GET_REG32_LOOP(RX_CPU_BASE, 0x280);
7099 GET_REG32_LOOP(TX_CPU_BASE, 0x280);
7100 GET_REG32_LOOP(GRCMBOX_INTERRUPT_0, 0x110);
7101 GET_REG32_LOOP(FTQ_RESET, 0x120);
7102 GET_REG32_LOOP(MSGINT_MODE, 0x0c);
7103 GET_REG32_1(DMAC_MODE);
7104 GET_REG32_LOOP(GRC_MODE, 0x4c);
7105 if (tp->tg3_flags & TG3_FLAG_NVRAM)
7106 GET_REG32_LOOP(NVRAM_CMD, 0x24);
7107
7108#undef __GET_REG32
7109#undef GET_REG32_LOOP
7110#undef GET_REG32_1
7111
David S. Millerf47c11e2005-06-24 20:18:35 -07007112 tg3_full_unlock(tp);
Linus Torvalds1da177e2005-04-16 15:20:36 -07007113}
7114
7115static int tg3_get_eeprom_len(struct net_device *dev)
7116{
7117 struct tg3 *tp = netdev_priv(dev);
7118
7119 return tp->nvram_size;
7120}
7121
7122static int tg3_nvram_read(struct tg3 *tp, u32 offset, u32 *val);
7123
7124static int tg3_get_eeprom(struct net_device *dev, struct ethtool_eeprom *eeprom, u8 *data)
7125{
7126 struct tg3 *tp = netdev_priv(dev);
7127 int ret;
7128 u8 *pd;
7129 u32 i, offset, len, val, b_offset, b_count;
7130
7131 offset = eeprom->offset;
7132 len = eeprom->len;
7133 eeprom->len = 0;
7134
7135 eeprom->magic = TG3_EEPROM_MAGIC;
7136
7137 if (offset & 3) {
7138 /* adjustments to start on required 4 byte boundary */
7139 b_offset = offset & 3;
7140 b_count = 4 - b_offset;
7141 if (b_count > len) {
7142 /* i.e. offset=1 len=2 */
7143 b_count = len;
7144 }
7145 ret = tg3_nvram_read(tp, offset-b_offset, &val);
7146 if (ret)
7147 return ret;
7148 val = cpu_to_le32(val);
7149 memcpy(data, ((char*)&val) + b_offset, b_count);
7150 len -= b_count;
7151 offset += b_count;
7152 eeprom->len += b_count;
7153 }
7154
7155 /* read bytes upto the last 4 byte boundary */
7156 pd = &data[eeprom->len];
7157 for (i = 0; i < (len - (len & 3)); i += 4) {
7158 ret = tg3_nvram_read(tp, offset + i, &val);
7159 if (ret) {
7160 eeprom->len += i;
7161 return ret;
7162 }
7163 val = cpu_to_le32(val);
7164 memcpy(pd + i, &val, 4);
7165 }
7166 eeprom->len += i;
7167
7168 if (len & 3) {
7169 /* read last bytes not ending on 4 byte boundary */
7170 pd = &data[eeprom->len];
7171 b_count = len & 3;
7172 b_offset = offset + len - b_count;
7173 ret = tg3_nvram_read(tp, b_offset, &val);
7174 if (ret)
7175 return ret;
7176 val = cpu_to_le32(val);
7177 memcpy(pd, ((char*)&val), b_count);
7178 eeprom->len += b_count;
7179 }
7180 return 0;
7181}
7182
7183static int tg3_nvram_write_block(struct tg3 *tp, u32 offset, u32 len, u8 *buf);
7184
7185static int tg3_set_eeprom(struct net_device *dev, struct ethtool_eeprom *eeprom, u8 *data)
7186{
7187 struct tg3 *tp = netdev_priv(dev);
7188 int ret;
7189 u32 offset, len, b_offset, odd_len, start, end;
7190 u8 *buf;
7191
7192 if (eeprom->magic != TG3_EEPROM_MAGIC)
7193 return -EINVAL;
7194
7195 offset = eeprom->offset;
7196 len = eeprom->len;
7197
7198 if ((b_offset = (offset & 3))) {
7199 /* adjustments to start on required 4 byte boundary */
7200 ret = tg3_nvram_read(tp, offset-b_offset, &start);
7201 if (ret)
7202 return ret;
7203 start = cpu_to_le32(start);
7204 len += b_offset;
7205 offset &= ~3;
Michael Chan1c8594b2005-04-21 17:12:46 -07007206 if (len < 4)
7207 len = 4;
Linus Torvalds1da177e2005-04-16 15:20:36 -07007208 }
7209
7210 odd_len = 0;
Michael Chan1c8594b2005-04-21 17:12:46 -07007211 if (len & 3) {
Linus Torvalds1da177e2005-04-16 15:20:36 -07007212 /* adjustments to end on required 4 byte boundary */
7213 odd_len = 1;
7214 len = (len + 3) & ~3;
7215 ret = tg3_nvram_read(tp, offset+len-4, &end);
7216 if (ret)
7217 return ret;
7218 end = cpu_to_le32(end);
7219 }
7220
7221 buf = data;
7222 if (b_offset || odd_len) {
7223 buf = kmalloc(len, GFP_KERNEL);
7224 if (buf == 0)
7225 return -ENOMEM;
7226 if (b_offset)
7227 memcpy(buf, &start, 4);
7228 if (odd_len)
7229 memcpy(buf+len-4, &end, 4);
7230 memcpy(buf + b_offset, data, eeprom->len);
7231 }
7232
7233 ret = tg3_nvram_write_block(tp, offset, len, buf);
7234
7235 if (buf != data)
7236 kfree(buf);
7237
7238 return ret;
7239}
7240
7241static int tg3_get_settings(struct net_device *dev, struct ethtool_cmd *cmd)
7242{
7243 struct tg3 *tp = netdev_priv(dev);
7244
7245 cmd->supported = (SUPPORTED_Autoneg);
7246
7247 if (!(tp->tg3_flags & TG3_FLAG_10_100_ONLY))
7248 cmd->supported |= (SUPPORTED_1000baseT_Half |
7249 SUPPORTED_1000baseT_Full);
7250
7251 if (!(tp->tg3_flags2 & TG3_FLG2_PHY_SERDES))
7252 cmd->supported |= (SUPPORTED_100baseT_Half |
7253 SUPPORTED_100baseT_Full |
7254 SUPPORTED_10baseT_Half |
7255 SUPPORTED_10baseT_Full |
7256 SUPPORTED_MII);
7257 else
7258 cmd->supported |= SUPPORTED_FIBRE;
7259
7260 cmd->advertising = tp->link_config.advertising;
7261 if (netif_running(dev)) {
7262 cmd->speed = tp->link_config.active_speed;
7263 cmd->duplex = tp->link_config.active_duplex;
7264 }
7265 cmd->port = 0;
7266 cmd->phy_address = PHY_ADDR;
7267 cmd->transceiver = 0;
7268 cmd->autoneg = tp->link_config.autoneg;
7269 cmd->maxtxpkt = 0;
7270 cmd->maxrxpkt = 0;
7271 return 0;
7272}
7273
7274static int tg3_set_settings(struct net_device *dev, struct ethtool_cmd *cmd)
7275{
7276 struct tg3 *tp = netdev_priv(dev);
7277
7278 if (tp->tg3_flags2 & TG3_FLG2_PHY_SERDES) {
7279 /* These are the only valid advertisement bits allowed. */
7280 if (cmd->autoneg == AUTONEG_ENABLE &&
7281 (cmd->advertising & ~(ADVERTISED_1000baseT_Half |
7282 ADVERTISED_1000baseT_Full |
7283 ADVERTISED_Autoneg |
7284 ADVERTISED_FIBRE)))
7285 return -EINVAL;
7286 }
7287
David S. Millerf47c11e2005-06-24 20:18:35 -07007288 tg3_full_lock(tp, 0);
Linus Torvalds1da177e2005-04-16 15:20:36 -07007289
7290 tp->link_config.autoneg = cmd->autoneg;
7291 if (cmd->autoneg == AUTONEG_ENABLE) {
7292 tp->link_config.advertising = cmd->advertising;
7293 tp->link_config.speed = SPEED_INVALID;
7294 tp->link_config.duplex = DUPLEX_INVALID;
7295 } else {
7296 tp->link_config.advertising = 0;
7297 tp->link_config.speed = cmd->speed;
7298 tp->link_config.duplex = cmd->duplex;
7299 }
7300
7301 if (netif_running(dev))
7302 tg3_setup_phy(tp, 1);
7303
David S. Millerf47c11e2005-06-24 20:18:35 -07007304 tg3_full_unlock(tp);
Linus Torvalds1da177e2005-04-16 15:20:36 -07007305
7306 return 0;
7307}
7308
7309static void tg3_get_drvinfo(struct net_device *dev, struct ethtool_drvinfo *info)
7310{
7311 struct tg3 *tp = netdev_priv(dev);
7312
7313 strcpy(info->driver, DRV_MODULE_NAME);
7314 strcpy(info->version, DRV_MODULE_VERSION);
7315 strcpy(info->bus_info, pci_name(tp->pdev));
7316}
7317
7318static void tg3_get_wol(struct net_device *dev, struct ethtool_wolinfo *wol)
7319{
7320 struct tg3 *tp = netdev_priv(dev);
7321
7322 wol->supported = WAKE_MAGIC;
7323 wol->wolopts = 0;
7324 if (tp->tg3_flags & TG3_FLAG_WOL_ENABLE)
7325 wol->wolopts = WAKE_MAGIC;
7326 memset(&wol->sopass, 0, sizeof(wol->sopass));
7327}
7328
7329static int tg3_set_wol(struct net_device *dev, struct ethtool_wolinfo *wol)
7330{
7331 struct tg3 *tp = netdev_priv(dev);
7332
7333 if (wol->wolopts & ~WAKE_MAGIC)
7334 return -EINVAL;
7335 if ((wol->wolopts & WAKE_MAGIC) &&
7336 tp->tg3_flags2 & TG3_FLG2_PHY_SERDES &&
7337 !(tp->tg3_flags & TG3_FLAG_SERDES_WOL_CAP))
7338 return -EINVAL;
7339
David S. Millerf47c11e2005-06-24 20:18:35 -07007340 spin_lock_bh(&tp->lock);
Linus Torvalds1da177e2005-04-16 15:20:36 -07007341 if (wol->wolopts & WAKE_MAGIC)
7342 tp->tg3_flags |= TG3_FLAG_WOL_ENABLE;
7343 else
7344 tp->tg3_flags &= ~TG3_FLAG_WOL_ENABLE;
David S. Millerf47c11e2005-06-24 20:18:35 -07007345 spin_unlock_bh(&tp->lock);
Linus Torvalds1da177e2005-04-16 15:20:36 -07007346
7347 return 0;
7348}
7349
7350static u32 tg3_get_msglevel(struct net_device *dev)
7351{
7352 struct tg3 *tp = netdev_priv(dev);
7353 return tp->msg_enable;
7354}
7355
7356static void tg3_set_msglevel(struct net_device *dev, u32 value)
7357{
7358 struct tg3 *tp = netdev_priv(dev);
7359 tp->msg_enable = value;
7360}
7361
7362#if TG3_TSO_SUPPORT != 0
7363static int tg3_set_tso(struct net_device *dev, u32 value)
7364{
7365 struct tg3 *tp = netdev_priv(dev);
7366
7367 if (!(tp->tg3_flags2 & TG3_FLG2_TSO_CAPABLE)) {
7368 if (value)
7369 return -EINVAL;
7370 return 0;
7371 }
7372 return ethtool_op_set_tso(dev, value);
7373}
7374#endif
7375
7376static int tg3_nway_reset(struct net_device *dev)
7377{
7378 struct tg3 *tp = netdev_priv(dev);
7379 u32 bmcr;
7380 int r;
7381
7382 if (!netif_running(dev))
7383 return -EAGAIN;
7384
David S. Millerf47c11e2005-06-24 20:18:35 -07007385 spin_lock_bh(&tp->lock);
Linus Torvalds1da177e2005-04-16 15:20:36 -07007386 r = -EINVAL;
7387 tg3_readphy(tp, MII_BMCR, &bmcr);
7388 if (!tg3_readphy(tp, MII_BMCR, &bmcr) &&
7389 (bmcr & BMCR_ANENABLE)) {
7390 tg3_writephy(tp, MII_BMCR, bmcr | BMCR_ANRESTART);
7391 r = 0;
7392 }
David S. Millerf47c11e2005-06-24 20:18:35 -07007393 spin_unlock_bh(&tp->lock);
Linus Torvalds1da177e2005-04-16 15:20:36 -07007394
7395 return r;
7396}
7397
7398static void tg3_get_ringparam(struct net_device *dev, struct ethtool_ringparam *ering)
7399{
7400 struct tg3 *tp = netdev_priv(dev);
7401
7402 ering->rx_max_pending = TG3_RX_RING_SIZE - 1;
7403 ering->rx_mini_max_pending = 0;
7404 ering->rx_jumbo_max_pending = TG3_RX_JUMBO_RING_SIZE - 1;
7405
7406 ering->rx_pending = tp->rx_pending;
7407 ering->rx_mini_pending = 0;
7408 ering->rx_jumbo_pending = tp->rx_jumbo_pending;
7409 ering->tx_pending = tp->tx_pending;
7410}
7411
7412static int tg3_set_ringparam(struct net_device *dev, struct ethtool_ringparam *ering)
7413{
7414 struct tg3 *tp = netdev_priv(dev);
Michael Chanbbe832c2005-06-24 20:20:04 -07007415 int irq_sync = 0;
Linus Torvalds1da177e2005-04-16 15:20:36 -07007416
7417 if ((ering->rx_pending > TG3_RX_RING_SIZE - 1) ||
7418 (ering->rx_jumbo_pending > TG3_RX_JUMBO_RING_SIZE - 1) ||
7419 (ering->tx_pending > TG3_TX_RING_SIZE - 1))
7420 return -EINVAL;
7421
Michael Chanbbe832c2005-06-24 20:20:04 -07007422 if (netif_running(dev)) {
Linus Torvalds1da177e2005-04-16 15:20:36 -07007423 tg3_netif_stop(tp);
Michael Chanbbe832c2005-06-24 20:20:04 -07007424 irq_sync = 1;
7425 }
Linus Torvalds1da177e2005-04-16 15:20:36 -07007426
Michael Chanbbe832c2005-06-24 20:20:04 -07007427 tg3_full_lock(tp, irq_sync);
Linus Torvalds1da177e2005-04-16 15:20:36 -07007428
7429 tp->rx_pending = ering->rx_pending;
7430
7431 if ((tp->tg3_flags2 & TG3_FLG2_MAX_RXPEND_64) &&
7432 tp->rx_pending > 63)
7433 tp->rx_pending = 63;
7434 tp->rx_jumbo_pending = ering->rx_jumbo_pending;
7435 tp->tx_pending = ering->tx_pending;
7436
7437 if (netif_running(dev)) {
Michael Chan944d9802005-05-29 14:57:48 -07007438 tg3_halt(tp, RESET_KIND_SHUTDOWN, 1);
Linus Torvalds1da177e2005-04-16 15:20:36 -07007439 tg3_init_hw(tp);
7440 tg3_netif_start(tp);
7441 }
7442
David S. Millerf47c11e2005-06-24 20:18:35 -07007443 tg3_full_unlock(tp);
Linus Torvalds1da177e2005-04-16 15:20:36 -07007444
7445 return 0;
7446}
7447
7448static void tg3_get_pauseparam(struct net_device *dev, struct ethtool_pauseparam *epause)
7449{
7450 struct tg3 *tp = netdev_priv(dev);
7451
7452 epause->autoneg = (tp->tg3_flags & TG3_FLAG_PAUSE_AUTONEG) != 0;
7453 epause->rx_pause = (tp->tg3_flags & TG3_FLAG_RX_PAUSE) != 0;
7454 epause->tx_pause = (tp->tg3_flags & TG3_FLAG_TX_PAUSE) != 0;
7455}
7456
7457static int tg3_set_pauseparam(struct net_device *dev, struct ethtool_pauseparam *epause)
7458{
7459 struct tg3 *tp = netdev_priv(dev);
Michael Chanbbe832c2005-06-24 20:20:04 -07007460 int irq_sync = 0;
Linus Torvalds1da177e2005-04-16 15:20:36 -07007461
Michael Chanbbe832c2005-06-24 20:20:04 -07007462 if (netif_running(dev)) {
Linus Torvalds1da177e2005-04-16 15:20:36 -07007463 tg3_netif_stop(tp);
Michael Chanbbe832c2005-06-24 20:20:04 -07007464 irq_sync = 1;
7465 }
Linus Torvalds1da177e2005-04-16 15:20:36 -07007466
Michael Chanbbe832c2005-06-24 20:20:04 -07007467 tg3_full_lock(tp, irq_sync);
David S. Millerf47c11e2005-06-24 20:18:35 -07007468
Linus Torvalds1da177e2005-04-16 15:20:36 -07007469 if (epause->autoneg)
7470 tp->tg3_flags |= TG3_FLAG_PAUSE_AUTONEG;
7471 else
7472 tp->tg3_flags &= ~TG3_FLAG_PAUSE_AUTONEG;
7473 if (epause->rx_pause)
7474 tp->tg3_flags |= TG3_FLAG_RX_PAUSE;
7475 else
7476 tp->tg3_flags &= ~TG3_FLAG_RX_PAUSE;
7477 if (epause->tx_pause)
7478 tp->tg3_flags |= TG3_FLAG_TX_PAUSE;
7479 else
7480 tp->tg3_flags &= ~TG3_FLAG_TX_PAUSE;
7481
7482 if (netif_running(dev)) {
Michael Chan944d9802005-05-29 14:57:48 -07007483 tg3_halt(tp, RESET_KIND_SHUTDOWN, 1);
Linus Torvalds1da177e2005-04-16 15:20:36 -07007484 tg3_init_hw(tp);
7485 tg3_netif_start(tp);
7486 }
David S. Millerf47c11e2005-06-24 20:18:35 -07007487
7488 tg3_full_unlock(tp);
Linus Torvalds1da177e2005-04-16 15:20:36 -07007489
7490 return 0;
7491}
7492
7493static u32 tg3_get_rx_csum(struct net_device *dev)
7494{
7495 struct tg3 *tp = netdev_priv(dev);
7496 return (tp->tg3_flags & TG3_FLAG_RX_CHECKSUMS) != 0;
7497}
7498
7499static int tg3_set_rx_csum(struct net_device *dev, u32 data)
7500{
7501 struct tg3 *tp = netdev_priv(dev);
7502
7503 if (tp->tg3_flags & TG3_FLAG_BROKEN_CHECKSUMS) {
7504 if (data != 0)
7505 return -EINVAL;
7506 return 0;
7507 }
7508
David S. Millerf47c11e2005-06-24 20:18:35 -07007509 spin_lock_bh(&tp->lock);
Linus Torvalds1da177e2005-04-16 15:20:36 -07007510 if (data)
7511 tp->tg3_flags |= TG3_FLAG_RX_CHECKSUMS;
7512 else
7513 tp->tg3_flags &= ~TG3_FLAG_RX_CHECKSUMS;
David S. Millerf47c11e2005-06-24 20:18:35 -07007514 spin_unlock_bh(&tp->lock);
Linus Torvalds1da177e2005-04-16 15:20:36 -07007515
7516 return 0;
7517}
7518
7519static int tg3_set_tx_csum(struct net_device *dev, u32 data)
7520{
7521 struct tg3 *tp = netdev_priv(dev);
7522
7523 if (tp->tg3_flags & TG3_FLAG_BROKEN_CHECKSUMS) {
7524 if (data != 0)
7525 return -EINVAL;
7526 return 0;
7527 }
7528
7529 if (data)
7530 dev->features |= NETIF_F_IP_CSUM;
7531 else
7532 dev->features &= ~NETIF_F_IP_CSUM;
7533
7534 return 0;
7535}
7536
7537static int tg3_get_stats_count (struct net_device *dev)
7538{
7539 return TG3_NUM_STATS;
7540}
7541
Michael Chan4cafd3f2005-05-29 14:56:34 -07007542static int tg3_get_test_count (struct net_device *dev)
7543{
7544 return TG3_NUM_TEST;
7545}
7546
Linus Torvalds1da177e2005-04-16 15:20:36 -07007547static void tg3_get_strings (struct net_device *dev, u32 stringset, u8 *buf)
7548{
7549 switch (stringset) {
7550 case ETH_SS_STATS:
7551 memcpy(buf, &ethtool_stats_keys, sizeof(ethtool_stats_keys));
7552 break;
Michael Chan4cafd3f2005-05-29 14:56:34 -07007553 case ETH_SS_TEST:
7554 memcpy(buf, &ethtool_test_keys, sizeof(ethtool_test_keys));
7555 break;
Linus Torvalds1da177e2005-04-16 15:20:36 -07007556 default:
7557 WARN_ON(1); /* we need a WARN() */
7558 break;
7559 }
7560}
7561
Michael Chan4009a932005-09-05 17:52:54 -07007562static int tg3_phys_id(struct net_device *dev, u32 data)
7563{
7564 struct tg3 *tp = netdev_priv(dev);
7565 int i;
7566
7567 if (!netif_running(tp->dev))
7568 return -EAGAIN;
7569
7570 if (data == 0)
7571 data = 2;
7572
7573 for (i = 0; i < (data * 2); i++) {
7574 if ((i % 2) == 0)
7575 tw32(MAC_LED_CTRL, LED_CTRL_LNKLED_OVERRIDE |
7576 LED_CTRL_1000MBPS_ON |
7577 LED_CTRL_100MBPS_ON |
7578 LED_CTRL_10MBPS_ON |
7579 LED_CTRL_TRAFFIC_OVERRIDE |
7580 LED_CTRL_TRAFFIC_BLINK |
7581 LED_CTRL_TRAFFIC_LED);
7582
7583 else
7584 tw32(MAC_LED_CTRL, LED_CTRL_LNKLED_OVERRIDE |
7585 LED_CTRL_TRAFFIC_OVERRIDE);
7586
7587 if (msleep_interruptible(500))
7588 break;
7589 }
7590 tw32(MAC_LED_CTRL, tp->led_ctrl);
7591 return 0;
7592}
7593
Linus Torvalds1da177e2005-04-16 15:20:36 -07007594static void tg3_get_ethtool_stats (struct net_device *dev,
7595 struct ethtool_stats *estats, u64 *tmp_stats)
7596{
7597 struct tg3 *tp = netdev_priv(dev);
7598 memcpy(tmp_stats, tg3_get_estats(tp), sizeof(tp->estats));
7599}
7600
Michael Chan566f86a2005-05-29 14:56:58 -07007601#define NVRAM_TEST_SIZE 0x100
7602
7603static int tg3_test_nvram(struct tg3 *tp)
7604{
7605 u32 *buf, csum;
7606 int i, j, err = 0;
7607
7608 buf = kmalloc(NVRAM_TEST_SIZE, GFP_KERNEL);
7609 if (buf == NULL)
7610 return -ENOMEM;
7611
7612 for (i = 0, j = 0; i < NVRAM_TEST_SIZE; i += 4, j++) {
7613 u32 val;
7614
7615 if ((err = tg3_nvram_read(tp, i, &val)) != 0)
7616 break;
7617 buf[j] = cpu_to_le32(val);
7618 }
7619 if (i < NVRAM_TEST_SIZE)
7620 goto out;
7621
7622 err = -EIO;
7623 if (cpu_to_be32(buf[0]) != TG3_EEPROM_MAGIC)
7624 goto out;
7625
7626 /* Bootstrap checksum at offset 0x10 */
7627 csum = calc_crc((unsigned char *) buf, 0x10);
7628 if(csum != cpu_to_le32(buf[0x10/4]))
7629 goto out;
7630
7631 /* Manufacturing block starts at offset 0x74, checksum at 0xfc */
7632 csum = calc_crc((unsigned char *) &buf[0x74/4], 0x88);
7633 if (csum != cpu_to_le32(buf[0xfc/4]))
7634 goto out;
7635
7636 err = 0;
7637
7638out:
7639 kfree(buf);
7640 return err;
7641}
7642
Michael Chanca430072005-05-29 14:57:23 -07007643#define TG3_SERDES_TIMEOUT_SEC 2
7644#define TG3_COPPER_TIMEOUT_SEC 6
7645
7646static int tg3_test_link(struct tg3 *tp)
7647{
7648 int i, max;
7649
7650 if (!netif_running(tp->dev))
7651 return -ENODEV;
7652
Michael Chan4c987482005-09-05 17:52:38 -07007653 if (tp->tg3_flags2 & TG3_FLG2_ANY_SERDES)
Michael Chanca430072005-05-29 14:57:23 -07007654 max = TG3_SERDES_TIMEOUT_SEC;
7655 else
7656 max = TG3_COPPER_TIMEOUT_SEC;
7657
7658 for (i = 0; i < max; i++) {
7659 if (netif_carrier_ok(tp->dev))
7660 return 0;
7661
7662 if (msleep_interruptible(1000))
7663 break;
7664 }
7665
7666 return -EIO;
7667}
7668
Michael Chana71116d2005-05-29 14:58:11 -07007669/* Only test the commonly used registers */
7670static int tg3_test_registers(struct tg3 *tp)
7671{
7672 int i, is_5705;
7673 u32 offset, read_mask, write_mask, val, save_val, read_val;
7674 static struct {
7675 u16 offset;
7676 u16 flags;
7677#define TG3_FL_5705 0x1
7678#define TG3_FL_NOT_5705 0x2
7679#define TG3_FL_NOT_5788 0x4
7680 u32 read_mask;
7681 u32 write_mask;
7682 } reg_tbl[] = {
7683 /* MAC Control Registers */
7684 { MAC_MODE, TG3_FL_NOT_5705,
7685 0x00000000, 0x00ef6f8c },
7686 { MAC_MODE, TG3_FL_5705,
7687 0x00000000, 0x01ef6b8c },
7688 { MAC_STATUS, TG3_FL_NOT_5705,
7689 0x03800107, 0x00000000 },
7690 { MAC_STATUS, TG3_FL_5705,
7691 0x03800100, 0x00000000 },
7692 { MAC_ADDR_0_HIGH, 0x0000,
7693 0x00000000, 0x0000ffff },
7694 { MAC_ADDR_0_LOW, 0x0000,
7695 0x00000000, 0xffffffff },
7696 { MAC_RX_MTU_SIZE, 0x0000,
7697 0x00000000, 0x0000ffff },
7698 { MAC_TX_MODE, 0x0000,
7699 0x00000000, 0x00000070 },
7700 { MAC_TX_LENGTHS, 0x0000,
7701 0x00000000, 0x00003fff },
7702 { MAC_RX_MODE, TG3_FL_NOT_5705,
7703 0x00000000, 0x000007fc },
7704 { MAC_RX_MODE, TG3_FL_5705,
7705 0x00000000, 0x000007dc },
7706 { MAC_HASH_REG_0, 0x0000,
7707 0x00000000, 0xffffffff },
7708 { MAC_HASH_REG_1, 0x0000,
7709 0x00000000, 0xffffffff },
7710 { MAC_HASH_REG_2, 0x0000,
7711 0x00000000, 0xffffffff },
7712 { MAC_HASH_REG_3, 0x0000,
7713 0x00000000, 0xffffffff },
7714
7715 /* Receive Data and Receive BD Initiator Control Registers. */
7716 { RCVDBDI_JUMBO_BD+0, TG3_FL_NOT_5705,
7717 0x00000000, 0xffffffff },
7718 { RCVDBDI_JUMBO_BD+4, TG3_FL_NOT_5705,
7719 0x00000000, 0xffffffff },
7720 { RCVDBDI_JUMBO_BD+8, TG3_FL_NOT_5705,
7721 0x00000000, 0x00000003 },
7722 { RCVDBDI_JUMBO_BD+0xc, TG3_FL_NOT_5705,
7723 0x00000000, 0xffffffff },
7724 { RCVDBDI_STD_BD+0, 0x0000,
7725 0x00000000, 0xffffffff },
7726 { RCVDBDI_STD_BD+4, 0x0000,
7727 0x00000000, 0xffffffff },
7728 { RCVDBDI_STD_BD+8, 0x0000,
7729 0x00000000, 0xffff0002 },
7730 { RCVDBDI_STD_BD+0xc, 0x0000,
7731 0x00000000, 0xffffffff },
7732
7733 /* Receive BD Initiator Control Registers. */
7734 { RCVBDI_STD_THRESH, TG3_FL_NOT_5705,
7735 0x00000000, 0xffffffff },
7736 { RCVBDI_STD_THRESH, TG3_FL_5705,
7737 0x00000000, 0x000003ff },
7738 { RCVBDI_JUMBO_THRESH, TG3_FL_NOT_5705,
7739 0x00000000, 0xffffffff },
7740
7741 /* Host Coalescing Control Registers. */
7742 { HOSTCC_MODE, TG3_FL_NOT_5705,
7743 0x00000000, 0x00000004 },
7744 { HOSTCC_MODE, TG3_FL_5705,
7745 0x00000000, 0x000000f6 },
7746 { HOSTCC_RXCOL_TICKS, TG3_FL_NOT_5705,
7747 0x00000000, 0xffffffff },
7748 { HOSTCC_RXCOL_TICKS, TG3_FL_5705,
7749 0x00000000, 0x000003ff },
7750 { HOSTCC_TXCOL_TICKS, TG3_FL_NOT_5705,
7751 0x00000000, 0xffffffff },
7752 { HOSTCC_TXCOL_TICKS, TG3_FL_5705,
7753 0x00000000, 0x000003ff },
7754 { HOSTCC_RXMAX_FRAMES, TG3_FL_NOT_5705,
7755 0x00000000, 0xffffffff },
7756 { HOSTCC_RXMAX_FRAMES, TG3_FL_5705 | TG3_FL_NOT_5788,
7757 0x00000000, 0x000000ff },
7758 { HOSTCC_TXMAX_FRAMES, TG3_FL_NOT_5705,
7759 0x00000000, 0xffffffff },
7760 { HOSTCC_TXMAX_FRAMES, TG3_FL_5705 | TG3_FL_NOT_5788,
7761 0x00000000, 0x000000ff },
7762 { HOSTCC_RXCOAL_TICK_INT, TG3_FL_NOT_5705,
7763 0x00000000, 0xffffffff },
7764 { HOSTCC_TXCOAL_TICK_INT, TG3_FL_NOT_5705,
7765 0x00000000, 0xffffffff },
7766 { HOSTCC_RXCOAL_MAXF_INT, TG3_FL_NOT_5705,
7767 0x00000000, 0xffffffff },
7768 { HOSTCC_RXCOAL_MAXF_INT, TG3_FL_5705 | TG3_FL_NOT_5788,
7769 0x00000000, 0x000000ff },
7770 { HOSTCC_TXCOAL_MAXF_INT, TG3_FL_NOT_5705,
7771 0x00000000, 0xffffffff },
7772 { HOSTCC_TXCOAL_MAXF_INT, TG3_FL_5705 | TG3_FL_NOT_5788,
7773 0x00000000, 0x000000ff },
7774 { HOSTCC_STAT_COAL_TICKS, TG3_FL_NOT_5705,
7775 0x00000000, 0xffffffff },
7776 { HOSTCC_STATS_BLK_HOST_ADDR, TG3_FL_NOT_5705,
7777 0x00000000, 0xffffffff },
7778 { HOSTCC_STATS_BLK_HOST_ADDR+4, TG3_FL_NOT_5705,
7779 0x00000000, 0xffffffff },
7780 { HOSTCC_STATUS_BLK_HOST_ADDR, 0x0000,
7781 0x00000000, 0xffffffff },
7782 { HOSTCC_STATUS_BLK_HOST_ADDR+4, 0x0000,
7783 0x00000000, 0xffffffff },
7784 { HOSTCC_STATS_BLK_NIC_ADDR, 0x0000,
7785 0xffffffff, 0x00000000 },
7786 { HOSTCC_STATUS_BLK_NIC_ADDR, 0x0000,
7787 0xffffffff, 0x00000000 },
7788
7789 /* Buffer Manager Control Registers. */
7790 { BUFMGR_MB_POOL_ADDR, 0x0000,
7791 0x00000000, 0x007fff80 },
7792 { BUFMGR_MB_POOL_SIZE, 0x0000,
7793 0x00000000, 0x007fffff },
7794 { BUFMGR_MB_RDMA_LOW_WATER, 0x0000,
7795 0x00000000, 0x0000003f },
7796 { BUFMGR_MB_MACRX_LOW_WATER, 0x0000,
7797 0x00000000, 0x000001ff },
7798 { BUFMGR_MB_HIGH_WATER, 0x0000,
7799 0x00000000, 0x000001ff },
7800 { BUFMGR_DMA_DESC_POOL_ADDR, TG3_FL_NOT_5705,
7801 0xffffffff, 0x00000000 },
7802 { BUFMGR_DMA_DESC_POOL_SIZE, TG3_FL_NOT_5705,
7803 0xffffffff, 0x00000000 },
7804
7805 /* Mailbox Registers */
7806 { GRCMBOX_RCVSTD_PROD_IDX+4, 0x0000,
7807 0x00000000, 0x000001ff },
7808 { GRCMBOX_RCVJUMBO_PROD_IDX+4, TG3_FL_NOT_5705,
7809 0x00000000, 0x000001ff },
7810 { GRCMBOX_RCVRET_CON_IDX_0+4, 0x0000,
7811 0x00000000, 0x000007ff },
7812 { GRCMBOX_SNDHOST_PROD_IDX_0+4, 0x0000,
7813 0x00000000, 0x000001ff },
7814
7815 { 0xffff, 0x0000, 0x00000000, 0x00000000 },
7816 };
7817
7818 if (tp->tg3_flags2 & TG3_FLG2_5705_PLUS)
7819 is_5705 = 1;
7820 else
7821 is_5705 = 0;
7822
7823 for (i = 0; reg_tbl[i].offset != 0xffff; i++) {
7824 if (is_5705 && (reg_tbl[i].flags & TG3_FL_NOT_5705))
7825 continue;
7826
7827 if (!is_5705 && (reg_tbl[i].flags & TG3_FL_5705))
7828 continue;
7829
7830 if ((tp->tg3_flags2 & TG3_FLG2_IS_5788) &&
7831 (reg_tbl[i].flags & TG3_FL_NOT_5788))
7832 continue;
7833
7834 offset = (u32) reg_tbl[i].offset;
7835 read_mask = reg_tbl[i].read_mask;
7836 write_mask = reg_tbl[i].write_mask;
7837
7838 /* Save the original register content */
7839 save_val = tr32(offset);
7840
7841 /* Determine the read-only value. */
7842 read_val = save_val & read_mask;
7843
7844 /* Write zero to the register, then make sure the read-only bits
7845 * are not changed and the read/write bits are all zeros.
7846 */
7847 tw32(offset, 0);
7848
7849 val = tr32(offset);
7850
7851 /* Test the read-only and read/write bits. */
7852 if (((val & read_mask) != read_val) || (val & write_mask))
7853 goto out;
7854
7855 /* Write ones to all the bits defined by RdMask and WrMask, then
7856 * make sure the read-only bits are not changed and the
7857 * read/write bits are all ones.
7858 */
7859 tw32(offset, read_mask | write_mask);
7860
7861 val = tr32(offset);
7862
7863 /* Test the read-only bits. */
7864 if ((val & read_mask) != read_val)
7865 goto out;
7866
7867 /* Test the read/write bits. */
7868 if ((val & write_mask) != write_mask)
7869 goto out;
7870
7871 tw32(offset, save_val);
7872 }
7873
7874 return 0;
7875
7876out:
7877 printk(KERN_ERR PFX "Register test failed at offset %x\n", offset);
7878 tw32(offset, save_val);
7879 return -EIO;
7880}
7881
Michael Chan7942e1d2005-05-29 14:58:36 -07007882static int tg3_do_mem_test(struct tg3 *tp, u32 offset, u32 len)
7883{
7884 static u32 test_pattern[] = { 0x00000000, 0xffffffff, 0xaa55a55a };
7885 int i;
7886 u32 j;
7887
7888 for (i = 0; i < sizeof(test_pattern)/sizeof(u32); i++) {
7889 for (j = 0; j < len; j += 4) {
7890 u32 val;
7891
7892 tg3_write_mem(tp, offset + j, test_pattern[i]);
7893 tg3_read_mem(tp, offset + j, &val);
7894 if (val != test_pattern[i])
7895 return -EIO;
7896 }
7897 }
7898 return 0;
7899}
7900
7901static int tg3_test_memory(struct tg3 *tp)
7902{
7903 static struct mem_entry {
7904 u32 offset;
7905 u32 len;
7906 } mem_tbl_570x[] = {
7907 { 0x00000000, 0x01000},
7908 { 0x00002000, 0x1c000},
7909 { 0xffffffff, 0x00000}
7910 }, mem_tbl_5705[] = {
7911 { 0x00000100, 0x0000c},
7912 { 0x00000200, 0x00008},
7913 { 0x00000b50, 0x00400},
7914 { 0x00004000, 0x00800},
7915 { 0x00006000, 0x01000},
7916 { 0x00008000, 0x02000},
7917 { 0x00010000, 0x0e000},
7918 { 0xffffffff, 0x00000}
7919 };
7920 struct mem_entry *mem_tbl;
7921 int err = 0;
7922 int i;
7923
7924 if (tp->tg3_flags2 & TG3_FLG2_5705_PLUS)
7925 mem_tbl = mem_tbl_5705;
7926 else
7927 mem_tbl = mem_tbl_570x;
7928
7929 for (i = 0; mem_tbl[i].offset != 0xffffffff; i++) {
7930 if ((err = tg3_do_mem_test(tp, mem_tbl[i].offset,
7931 mem_tbl[i].len)) != 0)
7932 break;
7933 }
7934
7935 return err;
7936}
7937
Michael Chan9f40dea2005-09-05 17:53:06 -07007938#define TG3_MAC_LOOPBACK 0
7939#define TG3_PHY_LOOPBACK 1
7940
7941static int tg3_run_loopback(struct tg3 *tp, int loopback_mode)
Michael Chanc76949a2005-05-29 14:58:59 -07007942{
Michael Chan9f40dea2005-09-05 17:53:06 -07007943 u32 mac_mode, rx_start_idx, rx_idx, tx_idx, opaque_key;
Michael Chanc76949a2005-05-29 14:58:59 -07007944 u32 desc_idx;
7945 struct sk_buff *skb, *rx_skb;
7946 u8 *tx_data;
7947 dma_addr_t map;
7948 int num_pkts, tx_len, rx_len, i, err;
7949 struct tg3_rx_buffer_desc *desc;
7950
Michael Chan9f40dea2005-09-05 17:53:06 -07007951 if (loopback_mode == TG3_MAC_LOOPBACK) {
7952 mac_mode = (tp->mac_mode & ~MAC_MODE_PORT_MODE_MASK) |
7953 MAC_MODE_PORT_INT_LPBACK | MAC_MODE_LINK_POLARITY |
7954 MAC_MODE_PORT_MODE_GMII;
7955 tw32(MAC_MODE, mac_mode);
7956 } else if (loopback_mode == TG3_PHY_LOOPBACK) {
7957 mac_mode = (tp->mac_mode & ~MAC_MODE_PORT_MODE_MASK) |
7958 MAC_MODE_LINK_POLARITY | MAC_MODE_PORT_MODE_GMII;
7959 if ((tp->phy_id & PHY_ID_MASK) == PHY_ID_BCM5401)
7960 mac_mode &= ~MAC_MODE_LINK_POLARITY;
7961 tw32(MAC_MODE, mac_mode);
7962
7963 tg3_writephy(tp, MII_BMCR, BMCR_LOOPBACK | BMCR_FULLDPLX |
7964 BMCR_SPEED1000);
7965 }
7966 else
7967 return -EINVAL;
Michael Chanc76949a2005-05-29 14:58:59 -07007968
7969 err = -EIO;
7970
Michael Chanc76949a2005-05-29 14:58:59 -07007971 tx_len = 1514;
7972 skb = dev_alloc_skb(tx_len);
7973 tx_data = skb_put(skb, tx_len);
7974 memcpy(tx_data, tp->dev->dev_addr, 6);
7975 memset(tx_data + 6, 0x0, 8);
7976
7977 tw32(MAC_RX_MTU_SIZE, tx_len + 4);
7978
7979 for (i = 14; i < tx_len; i++)
7980 tx_data[i] = (u8) (i & 0xff);
7981
7982 map = pci_map_single(tp->pdev, skb->data, tx_len, PCI_DMA_TODEVICE);
7983
7984 tw32_f(HOSTCC_MODE, tp->coalesce_mode | HOSTCC_MODE_ENABLE |
7985 HOSTCC_MODE_NOW);
7986
7987 udelay(10);
7988
7989 rx_start_idx = tp->hw_status->idx[0].rx_producer;
7990
Michael Chanc76949a2005-05-29 14:58:59 -07007991 num_pkts = 0;
7992
Michael Chan9f40dea2005-09-05 17:53:06 -07007993 tg3_set_txd(tp, tp->tx_prod, map, tx_len, 0, 1);
Michael Chanc76949a2005-05-29 14:58:59 -07007994
Michael Chan9f40dea2005-09-05 17:53:06 -07007995 tp->tx_prod++;
Michael Chanc76949a2005-05-29 14:58:59 -07007996 num_pkts++;
7997
Michael Chan9f40dea2005-09-05 17:53:06 -07007998 tw32_tx_mbox(MAILBOX_SNDHOST_PROD_IDX_0 + TG3_64BIT_REG_LOW,
7999 tp->tx_prod);
Michael Chan09ee9292005-08-09 20:17:00 -07008000 tr32_mailbox(MAILBOX_SNDHOST_PROD_IDX_0 + TG3_64BIT_REG_LOW);
Michael Chanc76949a2005-05-29 14:58:59 -07008001
8002 udelay(10);
8003
8004 for (i = 0; i < 10; i++) {
8005 tw32_f(HOSTCC_MODE, tp->coalesce_mode | HOSTCC_MODE_ENABLE |
8006 HOSTCC_MODE_NOW);
8007
8008 udelay(10);
8009
8010 tx_idx = tp->hw_status->idx[0].tx_consumer;
8011 rx_idx = tp->hw_status->idx[0].rx_producer;
Michael Chan9f40dea2005-09-05 17:53:06 -07008012 if ((tx_idx == tp->tx_prod) &&
Michael Chanc76949a2005-05-29 14:58:59 -07008013 (rx_idx == (rx_start_idx + num_pkts)))
8014 break;
8015 }
8016
8017 pci_unmap_single(tp->pdev, map, tx_len, PCI_DMA_TODEVICE);
8018 dev_kfree_skb(skb);
8019
Michael Chan9f40dea2005-09-05 17:53:06 -07008020 if (tx_idx != tp->tx_prod)
Michael Chanc76949a2005-05-29 14:58:59 -07008021 goto out;
8022
8023 if (rx_idx != rx_start_idx + num_pkts)
8024 goto out;
8025
8026 desc = &tp->rx_rcb[rx_start_idx];
8027 desc_idx = desc->opaque & RXD_OPAQUE_INDEX_MASK;
8028 opaque_key = desc->opaque & RXD_OPAQUE_RING_MASK;
8029 if (opaque_key != RXD_OPAQUE_RING_STD)
8030 goto out;
8031
8032 if ((desc->err_vlan & RXD_ERR_MASK) != 0 &&
8033 (desc->err_vlan != RXD_ERR_ODD_NIBBLE_RCVD_MII))
8034 goto out;
8035
8036 rx_len = ((desc->idx_len & RXD_LEN_MASK) >> RXD_LEN_SHIFT) - 4;
8037 if (rx_len != tx_len)
8038 goto out;
8039
8040 rx_skb = tp->rx_std_buffers[desc_idx].skb;
8041
8042 map = pci_unmap_addr(&tp->rx_std_buffers[desc_idx], mapping);
8043 pci_dma_sync_single_for_cpu(tp->pdev, map, rx_len, PCI_DMA_FROMDEVICE);
8044
8045 for (i = 14; i < tx_len; i++) {
8046 if (*(rx_skb->data + i) != (u8) (i & 0xff))
8047 goto out;
8048 }
8049 err = 0;
8050
8051 /* tg3_free_rings will unmap and free the rx_skb */
8052out:
8053 return err;
8054}
8055
Michael Chan9f40dea2005-09-05 17:53:06 -07008056#define TG3_MAC_LOOPBACK_FAILED 1
8057#define TG3_PHY_LOOPBACK_FAILED 2
8058#define TG3_LOOPBACK_FAILED (TG3_MAC_LOOPBACK_FAILED | \
8059 TG3_PHY_LOOPBACK_FAILED)
8060
8061static int tg3_test_loopback(struct tg3 *tp)
8062{
8063 int err = 0;
8064
8065 if (!netif_running(tp->dev))
8066 return TG3_LOOPBACK_FAILED;
8067
8068 tg3_reset_hw(tp);
8069
8070 if (tg3_run_loopback(tp, TG3_MAC_LOOPBACK))
8071 err |= TG3_MAC_LOOPBACK_FAILED;
8072 if (!(tp->tg3_flags2 & TG3_FLG2_PHY_SERDES)) {
8073 if (tg3_run_loopback(tp, TG3_PHY_LOOPBACK))
8074 err |= TG3_PHY_LOOPBACK_FAILED;
8075 }
8076
8077 return err;
8078}
8079
Michael Chan4cafd3f2005-05-29 14:56:34 -07008080static void tg3_self_test(struct net_device *dev, struct ethtool_test *etest,
8081 u64 *data)
8082{
Michael Chan566f86a2005-05-29 14:56:58 -07008083 struct tg3 *tp = netdev_priv(dev);
8084
8085 memset(data, 0, sizeof(u64) * TG3_NUM_TEST);
8086
8087 if (tg3_test_nvram(tp) != 0) {
8088 etest->flags |= ETH_TEST_FL_FAILED;
8089 data[0] = 1;
8090 }
Michael Chanca430072005-05-29 14:57:23 -07008091 if (tg3_test_link(tp) != 0) {
8092 etest->flags |= ETH_TEST_FL_FAILED;
8093 data[1] = 1;
8094 }
Michael Chana71116d2005-05-29 14:58:11 -07008095 if (etest->flags & ETH_TEST_FL_OFFLINE) {
Michael Chanbbe832c2005-06-24 20:20:04 -07008096 int irq_sync = 0;
Michael Chana71116d2005-05-29 14:58:11 -07008097
Michael Chanbbe832c2005-06-24 20:20:04 -07008098 if (netif_running(dev)) {
8099 tg3_netif_stop(tp);
8100 irq_sync = 1;
8101 }
8102
8103 tg3_full_lock(tp, irq_sync);
Michael Chana71116d2005-05-29 14:58:11 -07008104
8105 tg3_halt(tp, RESET_KIND_SUSPEND, 1);
8106 tg3_nvram_lock(tp);
8107 tg3_halt_cpu(tp, RX_CPU_BASE);
8108 if (!(tp->tg3_flags2 & TG3_FLG2_5705_PLUS))
8109 tg3_halt_cpu(tp, TX_CPU_BASE);
8110 tg3_nvram_unlock(tp);
8111
8112 if (tg3_test_registers(tp) != 0) {
8113 etest->flags |= ETH_TEST_FL_FAILED;
8114 data[2] = 1;
8115 }
Michael Chan7942e1d2005-05-29 14:58:36 -07008116 if (tg3_test_memory(tp) != 0) {
8117 etest->flags |= ETH_TEST_FL_FAILED;
8118 data[3] = 1;
8119 }
Michael Chan9f40dea2005-09-05 17:53:06 -07008120 if ((data[4] = tg3_test_loopback(tp)) != 0)
Michael Chanc76949a2005-05-29 14:58:59 -07008121 etest->flags |= ETH_TEST_FL_FAILED;
Michael Chana71116d2005-05-29 14:58:11 -07008122
David S. Millerf47c11e2005-06-24 20:18:35 -07008123 tg3_full_unlock(tp);
8124
Michael Chand4bc3922005-05-29 14:59:20 -07008125 if (tg3_test_interrupt(tp) != 0) {
8126 etest->flags |= ETH_TEST_FL_FAILED;
8127 data[5] = 1;
8128 }
David S. Millerf47c11e2005-06-24 20:18:35 -07008129
8130 tg3_full_lock(tp, 0);
Michael Chand4bc3922005-05-29 14:59:20 -07008131
Michael Chana71116d2005-05-29 14:58:11 -07008132 tg3_halt(tp, RESET_KIND_SHUTDOWN, 1);
8133 if (netif_running(dev)) {
8134 tp->tg3_flags |= TG3_FLAG_INIT_COMPLETE;
8135 tg3_init_hw(tp);
8136 tg3_netif_start(tp);
8137 }
David S. Millerf47c11e2005-06-24 20:18:35 -07008138
8139 tg3_full_unlock(tp);
Michael Chana71116d2005-05-29 14:58:11 -07008140 }
Michael Chan4cafd3f2005-05-29 14:56:34 -07008141}
8142
Linus Torvalds1da177e2005-04-16 15:20:36 -07008143static int tg3_ioctl(struct net_device *dev, struct ifreq *ifr, int cmd)
8144{
8145 struct mii_ioctl_data *data = if_mii(ifr);
8146 struct tg3 *tp = netdev_priv(dev);
8147 int err;
8148
8149 switch(cmd) {
8150 case SIOCGMIIPHY:
8151 data->phy_id = PHY_ADDR;
8152
8153 /* fallthru */
8154 case SIOCGMIIREG: {
8155 u32 mii_regval;
8156
8157 if (tp->tg3_flags2 & TG3_FLG2_PHY_SERDES)
8158 break; /* We have no PHY */
8159
David S. Millerf47c11e2005-06-24 20:18:35 -07008160 spin_lock_bh(&tp->lock);
Linus Torvalds1da177e2005-04-16 15:20:36 -07008161 err = tg3_readphy(tp, data->reg_num & 0x1f, &mii_regval);
David S. Millerf47c11e2005-06-24 20:18:35 -07008162 spin_unlock_bh(&tp->lock);
Linus Torvalds1da177e2005-04-16 15:20:36 -07008163
8164 data->val_out = mii_regval;
8165
8166 return err;
8167 }
8168
8169 case SIOCSMIIREG:
8170 if (tp->tg3_flags2 & TG3_FLG2_PHY_SERDES)
8171 break; /* We have no PHY */
8172
8173 if (!capable(CAP_NET_ADMIN))
8174 return -EPERM;
8175
David S. Millerf47c11e2005-06-24 20:18:35 -07008176 spin_lock_bh(&tp->lock);
Linus Torvalds1da177e2005-04-16 15:20:36 -07008177 err = tg3_writephy(tp, data->reg_num & 0x1f, data->val_in);
David S. Millerf47c11e2005-06-24 20:18:35 -07008178 spin_unlock_bh(&tp->lock);
Linus Torvalds1da177e2005-04-16 15:20:36 -07008179
8180 return err;
8181
8182 default:
8183 /* do nothing */
8184 break;
8185 }
8186 return -EOPNOTSUPP;
8187}
8188
8189#if TG3_VLAN_TAG_USED
8190static void tg3_vlan_rx_register(struct net_device *dev, struct vlan_group *grp)
8191{
8192 struct tg3 *tp = netdev_priv(dev);
8193
David S. Millerf47c11e2005-06-24 20:18:35 -07008194 tg3_full_lock(tp, 0);
Linus Torvalds1da177e2005-04-16 15:20:36 -07008195
8196 tp->vlgrp = grp;
8197
8198 /* Update RX_MODE_KEEP_VLAN_TAG bit in RX_MODE register. */
8199 __tg3_set_rx_mode(dev);
8200
David S. Millerf47c11e2005-06-24 20:18:35 -07008201 tg3_full_unlock(tp);
Linus Torvalds1da177e2005-04-16 15:20:36 -07008202}
8203
8204static void tg3_vlan_rx_kill_vid(struct net_device *dev, unsigned short vid)
8205{
8206 struct tg3 *tp = netdev_priv(dev);
8207
David S. Millerf47c11e2005-06-24 20:18:35 -07008208 tg3_full_lock(tp, 0);
Linus Torvalds1da177e2005-04-16 15:20:36 -07008209 if (tp->vlgrp)
8210 tp->vlgrp->vlan_devices[vid] = NULL;
David S. Millerf47c11e2005-06-24 20:18:35 -07008211 tg3_full_unlock(tp);
Linus Torvalds1da177e2005-04-16 15:20:36 -07008212}
8213#endif
8214
David S. Miller15f98502005-05-18 22:49:26 -07008215static int tg3_get_coalesce(struct net_device *dev, struct ethtool_coalesce *ec)
8216{
8217 struct tg3 *tp = netdev_priv(dev);
8218
8219 memcpy(ec, &tp->coal, sizeof(*ec));
8220 return 0;
8221}
8222
Michael Chand244c892005-07-05 14:42:33 -07008223static int tg3_set_coalesce(struct net_device *dev, struct ethtool_coalesce *ec)
8224{
8225 struct tg3 *tp = netdev_priv(dev);
8226 u32 max_rxcoal_tick_int = 0, max_txcoal_tick_int = 0;
8227 u32 max_stat_coal_ticks = 0, min_stat_coal_ticks = 0;
8228
8229 if (!(tp->tg3_flags2 & TG3_FLG2_5705_PLUS)) {
8230 max_rxcoal_tick_int = MAX_RXCOAL_TICK_INT;
8231 max_txcoal_tick_int = MAX_TXCOAL_TICK_INT;
8232 max_stat_coal_ticks = MAX_STAT_COAL_TICKS;
8233 min_stat_coal_ticks = MIN_STAT_COAL_TICKS;
8234 }
8235
8236 if ((ec->rx_coalesce_usecs > MAX_RXCOL_TICKS) ||
8237 (ec->tx_coalesce_usecs > MAX_TXCOL_TICKS) ||
8238 (ec->rx_max_coalesced_frames > MAX_RXMAX_FRAMES) ||
8239 (ec->tx_max_coalesced_frames > MAX_TXMAX_FRAMES) ||
8240 (ec->rx_coalesce_usecs_irq > max_rxcoal_tick_int) ||
8241 (ec->tx_coalesce_usecs_irq > max_txcoal_tick_int) ||
8242 (ec->rx_max_coalesced_frames_irq > MAX_RXCOAL_MAXF_INT) ||
8243 (ec->tx_max_coalesced_frames_irq > MAX_TXCOAL_MAXF_INT) ||
8244 (ec->stats_block_coalesce_usecs > max_stat_coal_ticks) ||
8245 (ec->stats_block_coalesce_usecs < min_stat_coal_ticks))
8246 return -EINVAL;
8247
8248 /* No rx interrupts will be generated if both are zero */
8249 if ((ec->rx_coalesce_usecs == 0) &&
8250 (ec->rx_max_coalesced_frames == 0))
8251 return -EINVAL;
8252
8253 /* No tx interrupts will be generated if both are zero */
8254 if ((ec->tx_coalesce_usecs == 0) &&
8255 (ec->tx_max_coalesced_frames == 0))
8256 return -EINVAL;
8257
8258 /* Only copy relevant parameters, ignore all others. */
8259 tp->coal.rx_coalesce_usecs = ec->rx_coalesce_usecs;
8260 tp->coal.tx_coalesce_usecs = ec->tx_coalesce_usecs;
8261 tp->coal.rx_max_coalesced_frames = ec->rx_max_coalesced_frames;
8262 tp->coal.tx_max_coalesced_frames = ec->tx_max_coalesced_frames;
8263 tp->coal.rx_coalesce_usecs_irq = ec->rx_coalesce_usecs_irq;
8264 tp->coal.tx_coalesce_usecs_irq = ec->tx_coalesce_usecs_irq;
8265 tp->coal.rx_max_coalesced_frames_irq = ec->rx_max_coalesced_frames_irq;
8266 tp->coal.tx_max_coalesced_frames_irq = ec->tx_max_coalesced_frames_irq;
8267 tp->coal.stats_block_coalesce_usecs = ec->stats_block_coalesce_usecs;
8268
8269 if (netif_running(dev)) {
8270 tg3_full_lock(tp, 0);
8271 __tg3_set_coalesce(tp, &tp->coal);
8272 tg3_full_unlock(tp);
8273 }
8274 return 0;
8275}
8276
Linus Torvalds1da177e2005-04-16 15:20:36 -07008277static struct ethtool_ops tg3_ethtool_ops = {
8278 .get_settings = tg3_get_settings,
8279 .set_settings = tg3_set_settings,
8280 .get_drvinfo = tg3_get_drvinfo,
8281 .get_regs_len = tg3_get_regs_len,
8282 .get_regs = tg3_get_regs,
8283 .get_wol = tg3_get_wol,
8284 .set_wol = tg3_set_wol,
8285 .get_msglevel = tg3_get_msglevel,
8286 .set_msglevel = tg3_set_msglevel,
8287 .nway_reset = tg3_nway_reset,
8288 .get_link = ethtool_op_get_link,
8289 .get_eeprom_len = tg3_get_eeprom_len,
8290 .get_eeprom = tg3_get_eeprom,
8291 .set_eeprom = tg3_set_eeprom,
8292 .get_ringparam = tg3_get_ringparam,
8293 .set_ringparam = tg3_set_ringparam,
8294 .get_pauseparam = tg3_get_pauseparam,
8295 .set_pauseparam = tg3_set_pauseparam,
8296 .get_rx_csum = tg3_get_rx_csum,
8297 .set_rx_csum = tg3_set_rx_csum,
8298 .get_tx_csum = ethtool_op_get_tx_csum,
8299 .set_tx_csum = tg3_set_tx_csum,
8300 .get_sg = ethtool_op_get_sg,
8301 .set_sg = ethtool_op_set_sg,
8302#if TG3_TSO_SUPPORT != 0
8303 .get_tso = ethtool_op_get_tso,
8304 .set_tso = tg3_set_tso,
8305#endif
Michael Chan4cafd3f2005-05-29 14:56:34 -07008306 .self_test_count = tg3_get_test_count,
8307 .self_test = tg3_self_test,
Linus Torvalds1da177e2005-04-16 15:20:36 -07008308 .get_strings = tg3_get_strings,
Michael Chan4009a932005-09-05 17:52:54 -07008309 .phys_id = tg3_phys_id,
Linus Torvalds1da177e2005-04-16 15:20:36 -07008310 .get_stats_count = tg3_get_stats_count,
8311 .get_ethtool_stats = tg3_get_ethtool_stats,
David S. Miller15f98502005-05-18 22:49:26 -07008312 .get_coalesce = tg3_get_coalesce,
Michael Chand244c892005-07-05 14:42:33 -07008313 .set_coalesce = tg3_set_coalesce,
Linus Torvalds1da177e2005-04-16 15:20:36 -07008314};
8315
8316static void __devinit tg3_get_eeprom_size(struct tg3 *tp)
8317{
8318 u32 cursize, val;
8319
8320 tp->nvram_size = EEPROM_CHIP_SIZE;
8321
8322 if (tg3_nvram_read(tp, 0, &val) != 0)
8323 return;
8324
8325 if (swab32(val) != TG3_EEPROM_MAGIC)
8326 return;
8327
8328 /*
8329 * Size the chip by reading offsets at increasing powers of two.
8330 * When we encounter our validation signature, we know the addressing
8331 * has wrapped around, and thus have our chip size.
8332 */
8333 cursize = 0x800;
8334
8335 while (cursize < tp->nvram_size) {
8336 if (tg3_nvram_read(tp, cursize, &val) != 0)
8337 return;
8338
8339 if (swab32(val) == TG3_EEPROM_MAGIC)
8340 break;
8341
8342 cursize <<= 1;
8343 }
8344
8345 tp->nvram_size = cursize;
8346}
8347
8348static void __devinit tg3_get_nvram_size(struct tg3 *tp)
8349{
8350 u32 val;
8351
8352 if (tg3_nvram_read(tp, 0xf0, &val) == 0) {
8353 if (val != 0) {
8354 tp->nvram_size = (val >> 16) * 1024;
8355 return;
8356 }
8357 }
8358 tp->nvram_size = 0x20000;
8359}
8360
8361static void __devinit tg3_get_nvram_info(struct tg3 *tp)
8362{
8363 u32 nvcfg1;
8364
8365 nvcfg1 = tr32(NVRAM_CFG1);
8366 if (nvcfg1 & NVRAM_CFG1_FLASHIF_ENAB) {
8367 tp->tg3_flags2 |= TG3_FLG2_FLASH;
8368 }
8369 else {
8370 nvcfg1 &= ~NVRAM_CFG1_COMPAT_BYPASS;
8371 tw32(NVRAM_CFG1, nvcfg1);
8372 }
8373
Michael Chan4c987482005-09-05 17:52:38 -07008374 if ((GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_5750) ||
8375 (GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_5780)) {
Linus Torvalds1da177e2005-04-16 15:20:36 -07008376 switch (nvcfg1 & NVRAM_CFG1_VENDOR_MASK) {
8377 case FLASH_VENDOR_ATMEL_FLASH_BUFFERED:
8378 tp->nvram_jedecnum = JEDEC_ATMEL;
8379 tp->nvram_pagesize = ATMEL_AT45DB0X1B_PAGE_SIZE;
8380 tp->tg3_flags |= TG3_FLAG_NVRAM_BUFFERED;
8381 break;
8382 case FLASH_VENDOR_ATMEL_FLASH_UNBUFFERED:
8383 tp->nvram_jedecnum = JEDEC_ATMEL;
8384 tp->nvram_pagesize = ATMEL_AT25F512_PAGE_SIZE;
8385 break;
8386 case FLASH_VENDOR_ATMEL_EEPROM:
8387 tp->nvram_jedecnum = JEDEC_ATMEL;
8388 tp->nvram_pagesize = ATMEL_AT24C512_CHIP_SIZE;
8389 tp->tg3_flags |= TG3_FLAG_NVRAM_BUFFERED;
8390 break;
8391 case FLASH_VENDOR_ST:
8392 tp->nvram_jedecnum = JEDEC_ST;
8393 tp->nvram_pagesize = ST_M45PEX0_PAGE_SIZE;
8394 tp->tg3_flags |= TG3_FLAG_NVRAM_BUFFERED;
8395 break;
8396 case FLASH_VENDOR_SAIFUN:
8397 tp->nvram_jedecnum = JEDEC_SAIFUN;
8398 tp->nvram_pagesize = SAIFUN_SA25F0XX_PAGE_SIZE;
8399 break;
8400 case FLASH_VENDOR_SST_SMALL:
8401 case FLASH_VENDOR_SST_LARGE:
8402 tp->nvram_jedecnum = JEDEC_SST;
8403 tp->nvram_pagesize = SST_25VF0X0_PAGE_SIZE;
8404 break;
8405 }
8406 }
8407 else {
8408 tp->nvram_jedecnum = JEDEC_ATMEL;
8409 tp->nvram_pagesize = ATMEL_AT45DB0X1B_PAGE_SIZE;
8410 tp->tg3_flags |= TG3_FLAG_NVRAM_BUFFERED;
8411 }
8412}
8413
Michael Chan361b4ac2005-04-21 17:11:21 -07008414static void __devinit tg3_get_5752_nvram_info(struct tg3 *tp)
8415{
8416 u32 nvcfg1;
8417
8418 nvcfg1 = tr32(NVRAM_CFG1);
8419
Michael Chane6af3012005-04-21 17:12:05 -07008420 /* NVRAM protection for TPM */
8421 if (nvcfg1 & (1 << 27))
8422 tp->tg3_flags2 |= TG3_FLG2_PROTECTED_NVRAM;
8423
Michael Chan361b4ac2005-04-21 17:11:21 -07008424 switch (nvcfg1 & NVRAM_CFG1_5752VENDOR_MASK) {
8425 case FLASH_5752VENDOR_ATMEL_EEPROM_64KHZ:
8426 case FLASH_5752VENDOR_ATMEL_EEPROM_376KHZ:
8427 tp->nvram_jedecnum = JEDEC_ATMEL;
8428 tp->tg3_flags |= TG3_FLAG_NVRAM_BUFFERED;
8429 break;
8430 case FLASH_5752VENDOR_ATMEL_FLASH_BUFFERED:
8431 tp->nvram_jedecnum = JEDEC_ATMEL;
8432 tp->tg3_flags |= TG3_FLAG_NVRAM_BUFFERED;
8433 tp->tg3_flags2 |= TG3_FLG2_FLASH;
8434 break;
8435 case FLASH_5752VENDOR_ST_M45PE10:
8436 case FLASH_5752VENDOR_ST_M45PE20:
8437 case FLASH_5752VENDOR_ST_M45PE40:
8438 tp->nvram_jedecnum = JEDEC_ST;
8439 tp->tg3_flags |= TG3_FLAG_NVRAM_BUFFERED;
8440 tp->tg3_flags2 |= TG3_FLG2_FLASH;
8441 break;
8442 }
8443
8444 if (tp->tg3_flags2 & TG3_FLG2_FLASH) {
8445 switch (nvcfg1 & NVRAM_CFG1_5752PAGE_SIZE_MASK) {
8446 case FLASH_5752PAGE_SIZE_256:
8447 tp->nvram_pagesize = 256;
8448 break;
8449 case FLASH_5752PAGE_SIZE_512:
8450 tp->nvram_pagesize = 512;
8451 break;
8452 case FLASH_5752PAGE_SIZE_1K:
8453 tp->nvram_pagesize = 1024;
8454 break;
8455 case FLASH_5752PAGE_SIZE_2K:
8456 tp->nvram_pagesize = 2048;
8457 break;
8458 case FLASH_5752PAGE_SIZE_4K:
8459 tp->nvram_pagesize = 4096;
8460 break;
8461 case FLASH_5752PAGE_SIZE_264:
8462 tp->nvram_pagesize = 264;
8463 break;
8464 }
8465 }
8466 else {
8467 /* For eeprom, set pagesize to maximum eeprom size */
8468 tp->nvram_pagesize = ATMEL_AT24C512_CHIP_SIZE;
8469
8470 nvcfg1 &= ~NVRAM_CFG1_COMPAT_BYPASS;
8471 tw32(NVRAM_CFG1, nvcfg1);
8472 }
8473}
8474
Linus Torvalds1da177e2005-04-16 15:20:36 -07008475/* Chips other than 5700/5701 use the NVRAM for fetching info. */
8476static void __devinit tg3_nvram_init(struct tg3 *tp)
8477{
8478 int j;
8479
8480 if (tp->tg3_flags2 & TG3_FLG2_SUN_570X)
8481 return;
8482
8483 tw32_f(GRC_EEPROM_ADDR,
8484 (EEPROM_ADDR_FSM_RESET |
8485 (EEPROM_DEFAULT_CLOCK_PERIOD <<
8486 EEPROM_ADDR_CLKPERD_SHIFT)));
8487
8488 /* XXX schedule_timeout() ... */
8489 for (j = 0; j < 100; j++)
8490 udelay(10);
8491
8492 /* Enable seeprom accesses. */
8493 tw32_f(GRC_LOCAL_CTRL,
8494 tr32(GRC_LOCAL_CTRL) | GRC_LCLCTRL_AUTO_SEEPROM);
8495 udelay(100);
8496
8497 if (GET_ASIC_REV(tp->pci_chip_rev_id) != ASIC_REV_5700 &&
8498 GET_ASIC_REV(tp->pci_chip_rev_id) != ASIC_REV_5701) {
8499 tp->tg3_flags |= TG3_FLAG_NVRAM;
8500
Michael Chane6af3012005-04-21 17:12:05 -07008501 tg3_enable_nvram_access(tp);
Linus Torvalds1da177e2005-04-16 15:20:36 -07008502
Michael Chan361b4ac2005-04-21 17:11:21 -07008503 if (GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_5752)
8504 tg3_get_5752_nvram_info(tp);
8505 else
8506 tg3_get_nvram_info(tp);
8507
Linus Torvalds1da177e2005-04-16 15:20:36 -07008508 tg3_get_nvram_size(tp);
8509
Michael Chane6af3012005-04-21 17:12:05 -07008510 tg3_disable_nvram_access(tp);
Linus Torvalds1da177e2005-04-16 15:20:36 -07008511
8512 } else {
8513 tp->tg3_flags &= ~(TG3_FLAG_NVRAM | TG3_FLAG_NVRAM_BUFFERED);
8514
8515 tg3_get_eeprom_size(tp);
8516 }
8517}
8518
8519static int tg3_nvram_read_using_eeprom(struct tg3 *tp,
8520 u32 offset, u32 *val)
8521{
8522 u32 tmp;
8523 int i;
8524
8525 if (offset > EEPROM_ADDR_ADDR_MASK ||
8526 (offset % 4) != 0)
8527 return -EINVAL;
8528
8529 tmp = tr32(GRC_EEPROM_ADDR) & ~(EEPROM_ADDR_ADDR_MASK |
8530 EEPROM_ADDR_DEVID_MASK |
8531 EEPROM_ADDR_READ);
8532 tw32(GRC_EEPROM_ADDR,
8533 tmp |
8534 (0 << EEPROM_ADDR_DEVID_SHIFT) |
8535 ((offset << EEPROM_ADDR_ADDR_SHIFT) &
8536 EEPROM_ADDR_ADDR_MASK) |
8537 EEPROM_ADDR_READ | EEPROM_ADDR_START);
8538
8539 for (i = 0; i < 10000; i++) {
8540 tmp = tr32(GRC_EEPROM_ADDR);
8541
8542 if (tmp & EEPROM_ADDR_COMPLETE)
8543 break;
8544 udelay(100);
8545 }
8546 if (!(tmp & EEPROM_ADDR_COMPLETE))
8547 return -EBUSY;
8548
8549 *val = tr32(GRC_EEPROM_DATA);
8550 return 0;
8551}
8552
8553#define NVRAM_CMD_TIMEOUT 10000
8554
8555static int tg3_nvram_exec_cmd(struct tg3 *tp, u32 nvram_cmd)
8556{
8557 int i;
8558
8559 tw32(NVRAM_CMD, nvram_cmd);
8560 for (i = 0; i < NVRAM_CMD_TIMEOUT; i++) {
8561 udelay(10);
8562 if (tr32(NVRAM_CMD) & NVRAM_CMD_DONE) {
8563 udelay(10);
8564 break;
8565 }
8566 }
8567 if (i == NVRAM_CMD_TIMEOUT) {
8568 return -EBUSY;
8569 }
8570 return 0;
8571}
8572
8573static int tg3_nvram_read(struct tg3 *tp, u32 offset, u32 *val)
8574{
8575 int ret;
8576
8577 if (tp->tg3_flags2 & TG3_FLG2_SUN_570X) {
8578 printk(KERN_ERR PFX "Attempt to do nvram_read on Sun 570X\n");
8579 return -EINVAL;
8580 }
8581
8582 if (!(tp->tg3_flags & TG3_FLAG_NVRAM))
8583 return tg3_nvram_read_using_eeprom(tp, offset, val);
8584
8585 if ((tp->tg3_flags & TG3_FLAG_NVRAM_BUFFERED) &&
8586 (tp->tg3_flags2 & TG3_FLG2_FLASH) &&
8587 (tp->nvram_jedecnum == JEDEC_ATMEL)) {
8588
8589 offset = ((offset / tp->nvram_pagesize) <<
8590 ATMEL_AT45DB0X1B_PAGE_POS) +
8591 (offset % tp->nvram_pagesize);
8592 }
8593
8594 if (offset > NVRAM_ADDR_MSK)
8595 return -EINVAL;
8596
8597 tg3_nvram_lock(tp);
8598
Michael Chane6af3012005-04-21 17:12:05 -07008599 tg3_enable_nvram_access(tp);
Linus Torvalds1da177e2005-04-16 15:20:36 -07008600
8601 tw32(NVRAM_ADDR, offset);
8602 ret = tg3_nvram_exec_cmd(tp, NVRAM_CMD_RD | NVRAM_CMD_GO |
8603 NVRAM_CMD_FIRST | NVRAM_CMD_LAST | NVRAM_CMD_DONE);
8604
8605 if (ret == 0)
8606 *val = swab32(tr32(NVRAM_RDDATA));
8607
8608 tg3_nvram_unlock(tp);
8609
Michael Chane6af3012005-04-21 17:12:05 -07008610 tg3_disable_nvram_access(tp);
Linus Torvalds1da177e2005-04-16 15:20:36 -07008611
8612 return ret;
8613}
8614
8615static int tg3_nvram_write_block_using_eeprom(struct tg3 *tp,
8616 u32 offset, u32 len, u8 *buf)
8617{
8618 int i, j, rc = 0;
8619 u32 val;
8620
8621 for (i = 0; i < len; i += 4) {
8622 u32 addr, data;
8623
8624 addr = offset + i;
8625
8626 memcpy(&data, buf + i, 4);
8627
8628 tw32(GRC_EEPROM_DATA, cpu_to_le32(data));
8629
8630 val = tr32(GRC_EEPROM_ADDR);
8631 tw32(GRC_EEPROM_ADDR, val | EEPROM_ADDR_COMPLETE);
8632
8633 val &= ~(EEPROM_ADDR_ADDR_MASK | EEPROM_ADDR_DEVID_MASK |
8634 EEPROM_ADDR_READ);
8635 tw32(GRC_EEPROM_ADDR, val |
8636 (0 << EEPROM_ADDR_DEVID_SHIFT) |
8637 (addr & EEPROM_ADDR_ADDR_MASK) |
8638 EEPROM_ADDR_START |
8639 EEPROM_ADDR_WRITE);
8640
8641 for (j = 0; j < 10000; j++) {
8642 val = tr32(GRC_EEPROM_ADDR);
8643
8644 if (val & EEPROM_ADDR_COMPLETE)
8645 break;
8646 udelay(100);
8647 }
8648 if (!(val & EEPROM_ADDR_COMPLETE)) {
8649 rc = -EBUSY;
8650 break;
8651 }
8652 }
8653
8654 return rc;
8655}
8656
8657/* offset and length are dword aligned */
8658static int tg3_nvram_write_block_unbuffered(struct tg3 *tp, u32 offset, u32 len,
8659 u8 *buf)
8660{
8661 int ret = 0;
8662 u32 pagesize = tp->nvram_pagesize;
8663 u32 pagemask = pagesize - 1;
8664 u32 nvram_cmd;
8665 u8 *tmp;
8666
8667 tmp = kmalloc(pagesize, GFP_KERNEL);
8668 if (tmp == NULL)
8669 return -ENOMEM;
8670
8671 while (len) {
8672 int j;
Michael Chane6af3012005-04-21 17:12:05 -07008673 u32 phy_addr, page_off, size;
Linus Torvalds1da177e2005-04-16 15:20:36 -07008674
8675 phy_addr = offset & ~pagemask;
8676
8677 for (j = 0; j < pagesize; j += 4) {
8678 if ((ret = tg3_nvram_read(tp, phy_addr + j,
8679 (u32 *) (tmp + j))))
8680 break;
8681 }
8682 if (ret)
8683 break;
8684
8685 page_off = offset & pagemask;
8686 size = pagesize;
8687 if (len < size)
8688 size = len;
8689
8690 len -= size;
8691
8692 memcpy(tmp + page_off, buf, size);
8693
8694 offset = offset + (pagesize - page_off);
8695
Michael Chane6af3012005-04-21 17:12:05 -07008696 tg3_enable_nvram_access(tp);
Linus Torvalds1da177e2005-04-16 15:20:36 -07008697
8698 /*
8699 * Before we can erase the flash page, we need
8700 * to issue a special "write enable" command.
8701 */
8702 nvram_cmd = NVRAM_CMD_WREN | NVRAM_CMD_GO | NVRAM_CMD_DONE;
8703
8704 if (tg3_nvram_exec_cmd(tp, nvram_cmd))
8705 break;
8706
8707 /* Erase the target page */
8708 tw32(NVRAM_ADDR, phy_addr);
8709
8710 nvram_cmd = NVRAM_CMD_GO | NVRAM_CMD_DONE | NVRAM_CMD_WR |
8711 NVRAM_CMD_FIRST | NVRAM_CMD_LAST | NVRAM_CMD_ERASE;
8712
8713 if (tg3_nvram_exec_cmd(tp, nvram_cmd))
8714 break;
8715
8716 /* Issue another write enable to start the write. */
8717 nvram_cmd = NVRAM_CMD_WREN | NVRAM_CMD_GO | NVRAM_CMD_DONE;
8718
8719 if (tg3_nvram_exec_cmd(tp, nvram_cmd))
8720 break;
8721
8722 for (j = 0; j < pagesize; j += 4) {
8723 u32 data;
8724
8725 data = *((u32 *) (tmp + j));
8726 tw32(NVRAM_WRDATA, cpu_to_be32(data));
8727
8728 tw32(NVRAM_ADDR, phy_addr + j);
8729
8730 nvram_cmd = NVRAM_CMD_GO | NVRAM_CMD_DONE |
8731 NVRAM_CMD_WR;
8732
8733 if (j == 0)
8734 nvram_cmd |= NVRAM_CMD_FIRST;
8735 else if (j == (pagesize - 4))
8736 nvram_cmd |= NVRAM_CMD_LAST;
8737
8738 if ((ret = tg3_nvram_exec_cmd(tp, nvram_cmd)))
8739 break;
8740 }
8741 if (ret)
8742 break;
8743 }
8744
8745 nvram_cmd = NVRAM_CMD_WRDI | NVRAM_CMD_GO | NVRAM_CMD_DONE;
8746 tg3_nvram_exec_cmd(tp, nvram_cmd);
8747
8748 kfree(tmp);
8749
8750 return ret;
8751}
8752
8753/* offset and length are dword aligned */
8754static int tg3_nvram_write_block_buffered(struct tg3 *tp, u32 offset, u32 len,
8755 u8 *buf)
8756{
8757 int i, ret = 0;
8758
8759 for (i = 0; i < len; i += 4, offset += 4) {
8760 u32 data, page_off, phy_addr, nvram_cmd;
8761
8762 memcpy(&data, buf + i, 4);
8763 tw32(NVRAM_WRDATA, cpu_to_be32(data));
8764
8765 page_off = offset % tp->nvram_pagesize;
8766
8767 if ((tp->tg3_flags2 & TG3_FLG2_FLASH) &&
8768 (tp->nvram_jedecnum == JEDEC_ATMEL)) {
8769
8770 phy_addr = ((offset / tp->nvram_pagesize) <<
8771 ATMEL_AT45DB0X1B_PAGE_POS) + page_off;
8772 }
8773 else {
8774 phy_addr = offset;
8775 }
8776
8777 tw32(NVRAM_ADDR, phy_addr);
8778
8779 nvram_cmd = NVRAM_CMD_GO | NVRAM_CMD_DONE | NVRAM_CMD_WR;
8780
8781 if ((page_off == 0) || (i == 0))
8782 nvram_cmd |= NVRAM_CMD_FIRST;
8783 else if (page_off == (tp->nvram_pagesize - 4))
8784 nvram_cmd |= NVRAM_CMD_LAST;
8785
8786 if (i == (len - 4))
8787 nvram_cmd |= NVRAM_CMD_LAST;
8788
Michael Chan4c987482005-09-05 17:52:38 -07008789 if ((GET_ASIC_REV(tp->pci_chip_rev_id) != ASIC_REV_5752) &&
8790 (tp->nvram_jedecnum == JEDEC_ST) &&
8791 (nvram_cmd & NVRAM_CMD_FIRST)) {
Linus Torvalds1da177e2005-04-16 15:20:36 -07008792
8793 if ((ret = tg3_nvram_exec_cmd(tp,
8794 NVRAM_CMD_WREN | NVRAM_CMD_GO |
8795 NVRAM_CMD_DONE)))
8796
8797 break;
8798 }
8799 if (!(tp->tg3_flags2 & TG3_FLG2_FLASH)) {
8800 /* We always do complete word writes to eeprom. */
8801 nvram_cmd |= (NVRAM_CMD_FIRST | NVRAM_CMD_LAST);
8802 }
8803
8804 if ((ret = tg3_nvram_exec_cmd(tp, nvram_cmd)))
8805 break;
8806 }
8807 return ret;
8808}
8809
8810/* offset and length are dword aligned */
8811static int tg3_nvram_write_block(struct tg3 *tp, u32 offset, u32 len, u8 *buf)
8812{
8813 int ret;
8814
8815 if (tp->tg3_flags2 & TG3_FLG2_SUN_570X) {
8816 printk(KERN_ERR PFX "Attempt to do nvram_write on Sun 570X\n");
8817 return -EINVAL;
8818 }
8819
8820 if (tp->tg3_flags & TG3_FLAG_EEPROM_WRITE_PROT) {
Michael Chan314fba32005-04-21 17:07:04 -07008821 tw32_f(GRC_LOCAL_CTRL, tp->grc_local_ctrl &
8822 ~GRC_LCLCTRL_GPIO_OUTPUT1);
Linus Torvalds1da177e2005-04-16 15:20:36 -07008823 udelay(40);
8824 }
8825
8826 if (!(tp->tg3_flags & TG3_FLAG_NVRAM)) {
8827 ret = tg3_nvram_write_block_using_eeprom(tp, offset, len, buf);
8828 }
8829 else {
8830 u32 grc_mode;
8831
8832 tg3_nvram_lock(tp);
8833
Michael Chane6af3012005-04-21 17:12:05 -07008834 tg3_enable_nvram_access(tp);
8835 if ((tp->tg3_flags2 & TG3_FLG2_5750_PLUS) &&
8836 !(tp->tg3_flags2 & TG3_FLG2_PROTECTED_NVRAM))
Linus Torvalds1da177e2005-04-16 15:20:36 -07008837 tw32(NVRAM_WRITE1, 0x406);
Linus Torvalds1da177e2005-04-16 15:20:36 -07008838
8839 grc_mode = tr32(GRC_MODE);
8840 tw32(GRC_MODE, grc_mode | GRC_MODE_NVRAM_WR_ENABLE);
8841
8842 if ((tp->tg3_flags & TG3_FLAG_NVRAM_BUFFERED) ||
8843 !(tp->tg3_flags2 & TG3_FLG2_FLASH)) {
8844
8845 ret = tg3_nvram_write_block_buffered(tp, offset, len,
8846 buf);
8847 }
8848 else {
8849 ret = tg3_nvram_write_block_unbuffered(tp, offset, len,
8850 buf);
8851 }
8852
8853 grc_mode = tr32(GRC_MODE);
8854 tw32(GRC_MODE, grc_mode & ~GRC_MODE_NVRAM_WR_ENABLE);
8855
Michael Chane6af3012005-04-21 17:12:05 -07008856 tg3_disable_nvram_access(tp);
Linus Torvalds1da177e2005-04-16 15:20:36 -07008857 tg3_nvram_unlock(tp);
8858 }
8859
8860 if (tp->tg3_flags & TG3_FLAG_EEPROM_WRITE_PROT) {
Michael Chan314fba32005-04-21 17:07:04 -07008861 tw32_f(GRC_LOCAL_CTRL, tp->grc_local_ctrl);
Linus Torvalds1da177e2005-04-16 15:20:36 -07008862 udelay(40);
8863 }
8864
8865 return ret;
8866}
8867
8868struct subsys_tbl_ent {
8869 u16 subsys_vendor, subsys_devid;
8870 u32 phy_id;
8871};
8872
8873static struct subsys_tbl_ent subsys_id_to_phy_id[] = {
8874 /* Broadcom boards. */
8875 { PCI_VENDOR_ID_BROADCOM, 0x1644, PHY_ID_BCM5401 }, /* BCM95700A6 */
8876 { PCI_VENDOR_ID_BROADCOM, 0x0001, PHY_ID_BCM5701 }, /* BCM95701A5 */
8877 { PCI_VENDOR_ID_BROADCOM, 0x0002, PHY_ID_BCM8002 }, /* BCM95700T6 */
8878 { PCI_VENDOR_ID_BROADCOM, 0x0003, 0 }, /* BCM95700A9 */
8879 { PCI_VENDOR_ID_BROADCOM, 0x0005, PHY_ID_BCM5701 }, /* BCM95701T1 */
8880 { PCI_VENDOR_ID_BROADCOM, 0x0006, PHY_ID_BCM5701 }, /* BCM95701T8 */
8881 { PCI_VENDOR_ID_BROADCOM, 0x0007, 0 }, /* BCM95701A7 */
8882 { PCI_VENDOR_ID_BROADCOM, 0x0008, PHY_ID_BCM5701 }, /* BCM95701A10 */
8883 { PCI_VENDOR_ID_BROADCOM, 0x8008, PHY_ID_BCM5701 }, /* BCM95701A12 */
8884 { PCI_VENDOR_ID_BROADCOM, 0x0009, PHY_ID_BCM5703 }, /* BCM95703Ax1 */
8885 { PCI_VENDOR_ID_BROADCOM, 0x8009, PHY_ID_BCM5703 }, /* BCM95703Ax2 */
8886
8887 /* 3com boards. */
8888 { PCI_VENDOR_ID_3COM, 0x1000, PHY_ID_BCM5401 }, /* 3C996T */
8889 { PCI_VENDOR_ID_3COM, 0x1006, PHY_ID_BCM5701 }, /* 3C996BT */
8890 { PCI_VENDOR_ID_3COM, 0x1004, 0 }, /* 3C996SX */
8891 { PCI_VENDOR_ID_3COM, 0x1007, PHY_ID_BCM5701 }, /* 3C1000T */
8892 { PCI_VENDOR_ID_3COM, 0x1008, PHY_ID_BCM5701 }, /* 3C940BR01 */
8893
8894 /* DELL boards. */
8895 { PCI_VENDOR_ID_DELL, 0x00d1, PHY_ID_BCM5401 }, /* VIPER */
8896 { PCI_VENDOR_ID_DELL, 0x0106, PHY_ID_BCM5401 }, /* JAGUAR */
8897 { PCI_VENDOR_ID_DELL, 0x0109, PHY_ID_BCM5411 }, /* MERLOT */
8898 { PCI_VENDOR_ID_DELL, 0x010a, PHY_ID_BCM5411 }, /* SLIM_MERLOT */
8899
8900 /* Compaq boards. */
8901 { PCI_VENDOR_ID_COMPAQ, 0x007c, PHY_ID_BCM5701 }, /* BANSHEE */
8902 { PCI_VENDOR_ID_COMPAQ, 0x009a, PHY_ID_BCM5701 }, /* BANSHEE_2 */
8903 { PCI_VENDOR_ID_COMPAQ, 0x007d, 0 }, /* CHANGELING */
8904 { PCI_VENDOR_ID_COMPAQ, 0x0085, PHY_ID_BCM5701 }, /* NC7780 */
8905 { PCI_VENDOR_ID_COMPAQ, 0x0099, PHY_ID_BCM5701 }, /* NC7780_2 */
8906
8907 /* IBM boards. */
8908 { PCI_VENDOR_ID_IBM, 0x0281, 0 } /* IBM??? */
8909};
8910
8911static inline struct subsys_tbl_ent *lookup_by_subsys(struct tg3 *tp)
8912{
8913 int i;
8914
8915 for (i = 0; i < ARRAY_SIZE(subsys_id_to_phy_id); i++) {
8916 if ((subsys_id_to_phy_id[i].subsys_vendor ==
8917 tp->pdev->subsystem_vendor) &&
8918 (subsys_id_to_phy_id[i].subsys_devid ==
8919 tp->pdev->subsystem_device))
8920 return &subsys_id_to_phy_id[i];
8921 }
8922 return NULL;
8923}
8924
Michael Chan7d0c41e2005-04-21 17:06:20 -07008925/* Since this function may be called in D3-hot power state during
8926 * tg3_init_one(), only config cycles are allowed.
8927 */
8928static void __devinit tg3_get_eeprom_hw_cfg(struct tg3 *tp)
Linus Torvalds1da177e2005-04-16 15:20:36 -07008929{
Linus Torvalds1da177e2005-04-16 15:20:36 -07008930 u32 val;
Michael Chan7d0c41e2005-04-21 17:06:20 -07008931
8932 /* Make sure register accesses (indirect or otherwise)
8933 * will function correctly.
8934 */
8935 pci_write_config_dword(tp->pdev, TG3PCI_MISC_HOST_CTRL,
8936 tp->misc_host_ctrl);
Linus Torvalds1da177e2005-04-16 15:20:36 -07008937
8938 tp->phy_id = PHY_ID_INVALID;
Michael Chan7d0c41e2005-04-21 17:06:20 -07008939 tp->led_ctrl = LED_CTRL_MODE_PHY_1;
8940
Linus Torvalds1da177e2005-04-16 15:20:36 -07008941 tg3_read_mem(tp, NIC_SRAM_DATA_SIG, &val);
8942 if (val == NIC_SRAM_DATA_SIG_MAGIC) {
8943 u32 nic_cfg, led_cfg;
Michael Chan7d0c41e2005-04-21 17:06:20 -07008944 u32 nic_phy_id, ver, cfg2 = 0, eeprom_phy_id;
8945 int eeprom_phy_serdes = 0;
Linus Torvalds1da177e2005-04-16 15:20:36 -07008946
8947 tg3_read_mem(tp, NIC_SRAM_DATA_CFG, &nic_cfg);
8948 tp->nic_sram_data_cfg = nic_cfg;
8949
8950 tg3_read_mem(tp, NIC_SRAM_DATA_VER, &ver);
8951 ver >>= NIC_SRAM_DATA_VER_SHIFT;
8952 if ((GET_ASIC_REV(tp->pci_chip_rev_id) != ASIC_REV_5700) &&
8953 (GET_ASIC_REV(tp->pci_chip_rev_id) != ASIC_REV_5701) &&
8954 (GET_ASIC_REV(tp->pci_chip_rev_id) != ASIC_REV_5703) &&
8955 (ver > 0) && (ver < 0x100))
8956 tg3_read_mem(tp, NIC_SRAM_DATA_CFG_2, &cfg2);
8957
Linus Torvalds1da177e2005-04-16 15:20:36 -07008958 if ((nic_cfg & NIC_SRAM_DATA_CFG_PHY_TYPE_MASK) ==
8959 NIC_SRAM_DATA_CFG_PHY_TYPE_FIBER)
8960 eeprom_phy_serdes = 1;
8961
8962 tg3_read_mem(tp, NIC_SRAM_DATA_PHY_ID, &nic_phy_id);
8963 if (nic_phy_id != 0) {
8964 u32 id1 = nic_phy_id & NIC_SRAM_DATA_PHY_ID1_MASK;
8965 u32 id2 = nic_phy_id & NIC_SRAM_DATA_PHY_ID2_MASK;
8966
8967 eeprom_phy_id = (id1 >> 16) << 10;
8968 eeprom_phy_id |= (id2 & 0xfc00) << 16;
8969 eeprom_phy_id |= (id2 & 0x03ff) << 0;
8970 } else
8971 eeprom_phy_id = 0;
8972
Michael Chan7d0c41e2005-04-21 17:06:20 -07008973 tp->phy_id = eeprom_phy_id;
Michael Chan747e8f82005-07-25 12:33:22 -07008974 if (eeprom_phy_serdes) {
8975 if (GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_5780)
8976 tp->tg3_flags2 |= TG3_FLG2_MII_SERDES;
8977 else
8978 tp->tg3_flags2 |= TG3_FLG2_PHY_SERDES;
8979 }
Michael Chan7d0c41e2005-04-21 17:06:20 -07008980
John W. Linvillecbf46852005-04-21 17:01:29 -07008981 if (tp->tg3_flags2 & TG3_FLG2_5750_PLUS)
Linus Torvalds1da177e2005-04-16 15:20:36 -07008982 led_cfg = cfg2 & (NIC_SRAM_DATA_CFG_LED_MODE_MASK |
8983 SHASTA_EXT_LED_MODE_MASK);
John W. Linvillecbf46852005-04-21 17:01:29 -07008984 else
Linus Torvalds1da177e2005-04-16 15:20:36 -07008985 led_cfg = nic_cfg & NIC_SRAM_DATA_CFG_LED_MODE_MASK;
8986
8987 switch (led_cfg) {
8988 default:
8989 case NIC_SRAM_DATA_CFG_LED_MODE_PHY_1:
8990 tp->led_ctrl = LED_CTRL_MODE_PHY_1;
8991 break;
8992
8993 case NIC_SRAM_DATA_CFG_LED_MODE_PHY_2:
8994 tp->led_ctrl = LED_CTRL_MODE_PHY_2;
8995 break;
8996
8997 case NIC_SRAM_DATA_CFG_LED_MODE_MAC:
8998 tp->led_ctrl = LED_CTRL_MODE_MAC;
Michael Chan9ba27792005-06-06 15:16:20 -07008999
9000 /* Default to PHY_1_MODE if 0 (MAC_MODE) is
9001 * read on some older 5700/5701 bootcode.
9002 */
9003 if (GET_ASIC_REV(tp->pci_chip_rev_id) ==
9004 ASIC_REV_5700 ||
9005 GET_ASIC_REV(tp->pci_chip_rev_id) ==
9006 ASIC_REV_5701)
9007 tp->led_ctrl = LED_CTRL_MODE_PHY_1;
9008
Linus Torvalds1da177e2005-04-16 15:20:36 -07009009 break;
9010
9011 case SHASTA_EXT_LED_SHARED:
9012 tp->led_ctrl = LED_CTRL_MODE_SHARED;
9013 if (tp->pci_chip_rev_id != CHIPREV_ID_5750_A0 &&
9014 tp->pci_chip_rev_id != CHIPREV_ID_5750_A1)
9015 tp->led_ctrl |= (LED_CTRL_MODE_PHY_1 |
9016 LED_CTRL_MODE_PHY_2);
9017 break;
9018
9019 case SHASTA_EXT_LED_MAC:
9020 tp->led_ctrl = LED_CTRL_MODE_SHASTA_MAC;
9021 break;
9022
9023 case SHASTA_EXT_LED_COMBO:
9024 tp->led_ctrl = LED_CTRL_MODE_COMBO;
9025 if (tp->pci_chip_rev_id != CHIPREV_ID_5750_A0)
9026 tp->led_ctrl |= (LED_CTRL_MODE_PHY_1 |
9027 LED_CTRL_MODE_PHY_2);
9028 break;
9029
9030 };
9031
9032 if ((GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_5700 ||
9033 GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_5701) &&
9034 tp->pdev->subsystem_vendor == PCI_VENDOR_ID_DELL)
9035 tp->led_ctrl = LED_CTRL_MODE_PHY_2;
9036
9037 if ((GET_ASIC_REV(tp->pci_chip_rev_id) != ASIC_REV_5700) &&
9038 (GET_ASIC_REV(tp->pci_chip_rev_id) != ASIC_REV_5701) &&
9039 (nic_cfg & NIC_SRAM_DATA_CFG_EEPROM_WP))
9040 tp->tg3_flags |= TG3_FLAG_EEPROM_WRITE_PROT;
9041
9042 if (nic_cfg & NIC_SRAM_DATA_CFG_ASF_ENABLE) {
9043 tp->tg3_flags |= TG3_FLAG_ENABLE_ASF;
John W. Linvillecbf46852005-04-21 17:01:29 -07009044 if (tp->tg3_flags2 & TG3_FLG2_5750_PLUS)
Linus Torvalds1da177e2005-04-16 15:20:36 -07009045 tp->tg3_flags2 |= TG3_FLG2_ASF_NEW_HANDSHAKE;
9046 }
9047 if (nic_cfg & NIC_SRAM_DATA_CFG_FIBER_WOL)
9048 tp->tg3_flags |= TG3_FLAG_SERDES_WOL_CAP;
9049
9050 if (cfg2 & (1 << 17))
9051 tp->tg3_flags2 |= TG3_FLG2_CAPACITIVE_COUPLING;
9052
9053 /* serdes signal pre-emphasis in register 0x590 set by */
9054 /* bootcode if bit 18 is set */
9055 if (cfg2 & (1 << 18))
9056 tp->tg3_flags2 |= TG3_FLG2_SERDES_PREEMPHASIS;
9057 }
Michael Chan7d0c41e2005-04-21 17:06:20 -07009058}
9059
9060static int __devinit tg3_phy_probe(struct tg3 *tp)
9061{
9062 u32 hw_phy_id_1, hw_phy_id_2;
9063 u32 hw_phy_id, hw_phy_id_masked;
9064 int err;
Linus Torvalds1da177e2005-04-16 15:20:36 -07009065
9066 /* Reading the PHY ID register can conflict with ASF
9067 * firwmare access to the PHY hardware.
9068 */
9069 err = 0;
9070 if (tp->tg3_flags & TG3_FLAG_ENABLE_ASF) {
9071 hw_phy_id = hw_phy_id_masked = PHY_ID_INVALID;
9072 } else {
9073 /* Now read the physical PHY_ID from the chip and verify
9074 * that it is sane. If it doesn't look good, we fall back
9075 * to either the hard-coded table based PHY_ID and failing
9076 * that the value found in the eeprom area.
9077 */
9078 err |= tg3_readphy(tp, MII_PHYSID1, &hw_phy_id_1);
9079 err |= tg3_readphy(tp, MII_PHYSID2, &hw_phy_id_2);
9080
9081 hw_phy_id = (hw_phy_id_1 & 0xffff) << 10;
9082 hw_phy_id |= (hw_phy_id_2 & 0xfc00) << 16;
9083 hw_phy_id |= (hw_phy_id_2 & 0x03ff) << 0;
9084
9085 hw_phy_id_masked = hw_phy_id & PHY_ID_MASK;
9086 }
9087
9088 if (!err && KNOWN_PHY_ID(hw_phy_id_masked)) {
9089 tp->phy_id = hw_phy_id;
9090 if (hw_phy_id_masked == PHY_ID_BCM8002)
9091 tp->tg3_flags2 |= TG3_FLG2_PHY_SERDES;
Michael Chanda6b2d02005-08-19 12:54:29 -07009092 else
9093 tp->tg3_flags2 &= ~TG3_FLG2_PHY_SERDES;
Linus Torvalds1da177e2005-04-16 15:20:36 -07009094 } else {
Michael Chan7d0c41e2005-04-21 17:06:20 -07009095 if (tp->phy_id != PHY_ID_INVALID) {
9096 /* Do nothing, phy ID already set up in
9097 * tg3_get_eeprom_hw_cfg().
9098 */
Linus Torvalds1da177e2005-04-16 15:20:36 -07009099 } else {
9100 struct subsys_tbl_ent *p;
9101
9102 /* No eeprom signature? Try the hardcoded
9103 * subsys device table.
9104 */
9105 p = lookup_by_subsys(tp);
9106 if (!p)
9107 return -ENODEV;
9108
9109 tp->phy_id = p->phy_id;
9110 if (!tp->phy_id ||
9111 tp->phy_id == PHY_ID_BCM8002)
9112 tp->tg3_flags2 |= TG3_FLG2_PHY_SERDES;
9113 }
9114 }
9115
Michael Chan747e8f82005-07-25 12:33:22 -07009116 if (!(tp->tg3_flags2 & TG3_FLG2_ANY_SERDES) &&
Linus Torvalds1da177e2005-04-16 15:20:36 -07009117 !(tp->tg3_flags & TG3_FLAG_ENABLE_ASF)) {
9118 u32 bmsr, adv_reg, tg3_ctrl;
9119
9120 tg3_readphy(tp, MII_BMSR, &bmsr);
9121 if (!tg3_readphy(tp, MII_BMSR, &bmsr) &&
9122 (bmsr & BMSR_LSTATUS))
9123 goto skip_phy_reset;
9124
9125 err = tg3_phy_reset(tp);
9126 if (err)
9127 return err;
9128
9129 adv_reg = (ADVERTISE_10HALF | ADVERTISE_10FULL |
9130 ADVERTISE_100HALF | ADVERTISE_100FULL |
9131 ADVERTISE_CSMA | ADVERTISE_PAUSE_CAP);
9132 tg3_ctrl = 0;
9133 if (!(tp->tg3_flags & TG3_FLAG_10_100_ONLY)) {
9134 tg3_ctrl = (MII_TG3_CTRL_ADV_1000_HALF |
9135 MII_TG3_CTRL_ADV_1000_FULL);
9136 if (tp->pci_chip_rev_id == CHIPREV_ID_5701_A0 ||
9137 tp->pci_chip_rev_id == CHIPREV_ID_5701_B0)
9138 tg3_ctrl |= (MII_TG3_CTRL_AS_MASTER |
9139 MII_TG3_CTRL_ENABLE_AS_MASTER);
9140 }
9141
9142 if (!tg3_copper_is_advertising_all(tp)) {
9143 tg3_writephy(tp, MII_ADVERTISE, adv_reg);
9144
9145 if (!(tp->tg3_flags & TG3_FLAG_10_100_ONLY))
9146 tg3_writephy(tp, MII_TG3_CTRL, tg3_ctrl);
9147
9148 tg3_writephy(tp, MII_BMCR,
9149 BMCR_ANENABLE | BMCR_ANRESTART);
9150 }
9151 tg3_phy_set_wirespeed(tp);
9152
9153 tg3_writephy(tp, MII_ADVERTISE, adv_reg);
9154 if (!(tp->tg3_flags & TG3_FLAG_10_100_ONLY))
9155 tg3_writephy(tp, MII_TG3_CTRL, tg3_ctrl);
9156 }
9157
9158skip_phy_reset:
9159 if ((tp->phy_id & PHY_ID_MASK) == PHY_ID_BCM5401) {
9160 err = tg3_init_5401phy_dsp(tp);
9161 if (err)
9162 return err;
9163 }
9164
9165 if (!err && ((tp->phy_id & PHY_ID_MASK) == PHY_ID_BCM5401)) {
9166 err = tg3_init_5401phy_dsp(tp);
9167 }
9168
Michael Chan747e8f82005-07-25 12:33:22 -07009169 if (tp->tg3_flags2 & TG3_FLG2_ANY_SERDES)
Linus Torvalds1da177e2005-04-16 15:20:36 -07009170 tp->link_config.advertising =
9171 (ADVERTISED_1000baseT_Half |
9172 ADVERTISED_1000baseT_Full |
9173 ADVERTISED_Autoneg |
9174 ADVERTISED_FIBRE);
9175 if (tp->tg3_flags & TG3_FLAG_10_100_ONLY)
9176 tp->link_config.advertising &=
9177 ~(ADVERTISED_1000baseT_Half |
9178 ADVERTISED_1000baseT_Full);
9179
9180 return err;
9181}
9182
9183static void __devinit tg3_read_partno(struct tg3 *tp)
9184{
9185 unsigned char vpd_data[256];
9186 int i;
9187
9188 if (tp->tg3_flags2 & TG3_FLG2_SUN_570X) {
9189 /* Sun decided not to put the necessary bits in the
9190 * NVRAM of their onboard tg3 parts :(
9191 */
9192 strcpy(tp->board_part_number, "Sun 570X");
9193 return;
9194 }
9195
9196 for (i = 0; i < 256; i += 4) {
9197 u32 tmp;
9198
9199 if (tg3_nvram_read(tp, 0x100 + i, &tmp))
9200 goto out_not_found;
9201
9202 vpd_data[i + 0] = ((tmp >> 0) & 0xff);
9203 vpd_data[i + 1] = ((tmp >> 8) & 0xff);
9204 vpd_data[i + 2] = ((tmp >> 16) & 0xff);
9205 vpd_data[i + 3] = ((tmp >> 24) & 0xff);
9206 }
9207
9208 /* Now parse and find the part number. */
9209 for (i = 0; i < 256; ) {
9210 unsigned char val = vpd_data[i];
9211 int block_end;
9212
9213 if (val == 0x82 || val == 0x91) {
9214 i = (i + 3 +
9215 (vpd_data[i + 1] +
9216 (vpd_data[i + 2] << 8)));
9217 continue;
9218 }
9219
9220 if (val != 0x90)
9221 goto out_not_found;
9222
9223 block_end = (i + 3 +
9224 (vpd_data[i + 1] +
9225 (vpd_data[i + 2] << 8)));
9226 i += 3;
9227 while (i < block_end) {
9228 if (vpd_data[i + 0] == 'P' &&
9229 vpd_data[i + 1] == 'N') {
9230 int partno_len = vpd_data[i + 2];
9231
9232 if (partno_len > 24)
9233 goto out_not_found;
9234
9235 memcpy(tp->board_part_number,
9236 &vpd_data[i + 3],
9237 partno_len);
9238
9239 /* Success. */
9240 return;
9241 }
9242 }
9243
9244 /* Part number not found. */
9245 goto out_not_found;
9246 }
9247
9248out_not_found:
9249 strcpy(tp->board_part_number, "none");
9250}
9251
9252#ifdef CONFIG_SPARC64
9253static int __devinit tg3_is_sun_570X(struct tg3 *tp)
9254{
9255 struct pci_dev *pdev = tp->pdev;
9256 struct pcidev_cookie *pcp = pdev->sysdata;
9257
9258 if (pcp != NULL) {
9259 int node = pcp->prom_node;
9260 u32 venid;
9261 int err;
9262
9263 err = prom_getproperty(node, "subsystem-vendor-id",
9264 (char *) &venid, sizeof(venid));
9265 if (err == 0 || err == -1)
9266 return 0;
9267 if (venid == PCI_VENDOR_ID_SUN)
9268 return 1;
9269 }
9270 return 0;
9271}
9272#endif
9273
9274static int __devinit tg3_get_invariants(struct tg3 *tp)
9275{
9276 static struct pci_device_id write_reorder_chipsets[] = {
Linus Torvalds1da177e2005-04-16 15:20:36 -07009277 { PCI_DEVICE(PCI_VENDOR_ID_AMD,
9278 PCI_DEVICE_ID_AMD_FE_GATE_700C) },
9279 { },
9280 };
9281 u32 misc_ctrl_reg;
9282 u32 cacheline_sz_reg;
9283 u32 pci_state_reg, grc_misc_cfg;
9284 u32 val;
9285 u16 pci_cmd;
9286 int err;
9287
9288#ifdef CONFIG_SPARC64
9289 if (tg3_is_sun_570X(tp))
9290 tp->tg3_flags2 |= TG3_FLG2_SUN_570X;
9291#endif
9292
Michael Chan68929142005-08-09 20:17:14 -07009293 /* If we have an AMD 762 chipset, write
Linus Torvalds1da177e2005-04-16 15:20:36 -07009294 * reordering to the mailbox registers done by the host
9295 * controller can cause major troubles. We read back from
9296 * every mailbox register write to force the writes to be
9297 * posted to the chip in order.
9298 */
9299 if (pci_dev_present(write_reorder_chipsets))
9300 tp->tg3_flags |= TG3_FLAG_MBOX_WRITE_REORDER;
9301
9302 /* Force memory write invalidate off. If we leave it on,
9303 * then on 5700_BX chips we have to enable a workaround.
9304 * The workaround is to set the TG3PCI_DMA_RW_CTRL boundary
9305 * to match the cacheline size. The Broadcom driver have this
9306 * workaround but turns MWI off all the times so never uses
9307 * it. This seems to suggest that the workaround is insufficient.
9308 */
9309 pci_read_config_word(tp->pdev, PCI_COMMAND, &pci_cmd);
9310 pci_cmd &= ~PCI_COMMAND_INVALIDATE;
9311 pci_write_config_word(tp->pdev, PCI_COMMAND, pci_cmd);
9312
9313 /* It is absolutely critical that TG3PCI_MISC_HOST_CTRL
9314 * has the register indirect write enable bit set before
9315 * we try to access any of the MMIO registers. It is also
9316 * critical that the PCI-X hw workaround situation is decided
9317 * before that as well.
9318 */
9319 pci_read_config_dword(tp->pdev, TG3PCI_MISC_HOST_CTRL,
9320 &misc_ctrl_reg);
9321
9322 tp->pci_chip_rev_id = (misc_ctrl_reg >>
9323 MISC_HOST_CTRL_CHIPREV_SHIFT);
9324
Michael Chanff645be2005-04-21 17:09:53 -07009325 /* Wrong chip ID in 5752 A0. This code can be removed later
9326 * as A0 is not in production.
9327 */
9328 if (tp->pci_chip_rev_id == CHIPREV_ID_5752_A0_HW)
9329 tp->pci_chip_rev_id = CHIPREV_ID_5752_A0;
9330
Michael Chan68929142005-08-09 20:17:14 -07009331 /* If we have 5702/03 A1 or A2 on certain ICH chipsets,
9332 * we need to disable memory and use config. cycles
9333 * only to access all registers. The 5702/03 chips
9334 * can mistakenly decode the special cycles from the
9335 * ICH chipsets as memory write cycles, causing corruption
9336 * of register and memory space. Only certain ICH bridges
9337 * will drive special cycles with non-zero data during the
9338 * address phase which can fall within the 5703's address
9339 * range. This is not an ICH bug as the PCI spec allows
9340 * non-zero address during special cycles. However, only
9341 * these ICH bridges are known to drive non-zero addresses
9342 * during special cycles.
9343 *
9344 * Since special cycles do not cross PCI bridges, we only
9345 * enable this workaround if the 5703 is on the secondary
9346 * bus of these ICH bridges.
9347 */
9348 if ((tp->pci_chip_rev_id == CHIPREV_ID_5703_A1) ||
9349 (tp->pci_chip_rev_id == CHIPREV_ID_5703_A2)) {
9350 static struct tg3_dev_id {
9351 u32 vendor;
9352 u32 device;
9353 u32 rev;
9354 } ich_chipsets[] = {
9355 { PCI_VENDOR_ID_INTEL, PCI_DEVICE_ID_INTEL_82801AA_8,
9356 PCI_ANY_ID },
9357 { PCI_VENDOR_ID_INTEL, PCI_DEVICE_ID_INTEL_82801AB_8,
9358 PCI_ANY_ID },
9359 { PCI_VENDOR_ID_INTEL, PCI_DEVICE_ID_INTEL_82801BA_11,
9360 0xa },
9361 { PCI_VENDOR_ID_INTEL, PCI_DEVICE_ID_INTEL_82801BA_6,
9362 PCI_ANY_ID },
9363 { },
9364 };
9365 struct tg3_dev_id *pci_id = &ich_chipsets[0];
9366 struct pci_dev *bridge = NULL;
9367
9368 while (pci_id->vendor != 0) {
9369 bridge = pci_get_device(pci_id->vendor, pci_id->device,
9370 bridge);
9371 if (!bridge) {
9372 pci_id++;
9373 continue;
9374 }
9375 if (pci_id->rev != PCI_ANY_ID) {
9376 u8 rev;
9377
9378 pci_read_config_byte(bridge, PCI_REVISION_ID,
9379 &rev);
9380 if (rev > pci_id->rev)
9381 continue;
9382 }
9383 if (bridge->subordinate &&
9384 (bridge->subordinate->number ==
9385 tp->pdev->bus->number)) {
9386
9387 tp->tg3_flags2 |= TG3_FLG2_ICH_WORKAROUND;
9388 pci_dev_put(bridge);
9389 break;
9390 }
9391 }
9392 }
9393
Michael Chan4cf78e42005-07-25 12:29:19 -07009394 /* Find msi capability. */
9395 if (GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_5780)
9396 tp->msi_cap = pci_find_capability(tp->pdev, PCI_CAP_ID_MSI);
9397
Linus Torvalds1da177e2005-04-16 15:20:36 -07009398 /* Initialize misc host control in PCI block. */
9399 tp->misc_host_ctrl |= (misc_ctrl_reg &
9400 MISC_HOST_CTRL_CHIPREV);
9401 pci_write_config_dword(tp->pdev, TG3PCI_MISC_HOST_CTRL,
9402 tp->misc_host_ctrl);
9403
9404 pci_read_config_dword(tp->pdev, TG3PCI_CACHELINESZ,
9405 &cacheline_sz_reg);
9406
9407 tp->pci_cacheline_sz = (cacheline_sz_reg >> 0) & 0xff;
9408 tp->pci_lat_timer = (cacheline_sz_reg >> 8) & 0xff;
9409 tp->pci_hdr_type = (cacheline_sz_reg >> 16) & 0xff;
9410 tp->pci_bist = (cacheline_sz_reg >> 24) & 0xff;
9411
John W. Linville2052da92005-04-21 16:56:08 -07009412 if (GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_5750 ||
Michael Chan4cf78e42005-07-25 12:29:19 -07009413 GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_5752 ||
9414 GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_5780)
John W. Linville6708e5c2005-04-21 17:00:52 -07009415 tp->tg3_flags2 |= TG3_FLG2_5750_PLUS;
9416
John W. Linville1b440c562005-04-21 17:03:18 -07009417 if ((GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_5705) ||
9418 (tp->tg3_flags2 & TG3_FLG2_5750_PLUS))
9419 tp->tg3_flags2 |= TG3_FLG2_5705_PLUS;
9420
John W. Linvillebb7064d2005-04-21 17:02:41 -07009421 if (tp->tg3_flags2 & TG3_FLG2_5750_PLUS)
Linus Torvalds1da177e2005-04-16 15:20:36 -07009422 tp->tg3_flags2 |= TG3_FLG2_HW_TSO;
9423
Michael Chan0f893dc2005-07-25 12:30:38 -07009424 if (GET_ASIC_REV(tp->pci_chip_rev_id) != ASIC_REV_5705 &&
9425 GET_ASIC_REV(tp->pci_chip_rev_id) != ASIC_REV_5750 &&
9426 GET_ASIC_REV(tp->pci_chip_rev_id) != ASIC_REV_5752)
9427 tp->tg3_flags2 |= TG3_FLG2_JUMBO_CAPABLE;
9428
Linus Torvalds1da177e2005-04-16 15:20:36 -07009429 if (pci_find_capability(tp->pdev, PCI_CAP_ID_EXP) != 0)
9430 tp->tg3_flags2 |= TG3_FLG2_PCI_EXPRESS;
9431
9432 if (GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_5703 &&
9433 tp->pci_lat_timer < 64) {
9434 tp->pci_lat_timer = 64;
9435
9436 cacheline_sz_reg = ((tp->pci_cacheline_sz & 0xff) << 0);
9437 cacheline_sz_reg |= ((tp->pci_lat_timer & 0xff) << 8);
9438 cacheline_sz_reg |= ((tp->pci_hdr_type & 0xff) << 16);
9439 cacheline_sz_reg |= ((tp->pci_bist & 0xff) << 24);
9440
9441 pci_write_config_dword(tp->pdev, TG3PCI_CACHELINESZ,
9442 cacheline_sz_reg);
9443 }
9444
9445 pci_read_config_dword(tp->pdev, TG3PCI_PCISTATE,
9446 &pci_state_reg);
9447
9448 if ((pci_state_reg & PCISTATE_CONV_PCI_MODE) == 0) {
9449 tp->tg3_flags |= TG3_FLAG_PCIX_MODE;
9450
9451 /* If this is a 5700 BX chipset, and we are in PCI-X
9452 * mode, enable register write workaround.
9453 *
9454 * The workaround is to use indirect register accesses
9455 * for all chip writes not to mailbox registers.
9456 */
9457 if (GET_CHIP_REV(tp->pci_chip_rev_id) == CHIPREV_5700_BX) {
9458 u32 pm_reg;
9459 u16 pci_cmd;
9460
9461 tp->tg3_flags |= TG3_FLAG_PCIX_TARGET_HWBUG;
9462
9463 /* The chip can have it's power management PCI config
9464 * space registers clobbered due to this bug.
9465 * So explicitly force the chip into D0 here.
9466 */
9467 pci_read_config_dword(tp->pdev, TG3PCI_PM_CTRL_STAT,
9468 &pm_reg);
9469 pm_reg &= ~PCI_PM_CTRL_STATE_MASK;
9470 pm_reg |= PCI_PM_CTRL_PME_ENABLE | 0 /* D0 */;
9471 pci_write_config_dword(tp->pdev, TG3PCI_PM_CTRL_STAT,
9472 pm_reg);
9473
9474 /* Also, force SERR#/PERR# in PCI command. */
9475 pci_read_config_word(tp->pdev, PCI_COMMAND, &pci_cmd);
9476 pci_cmd |= PCI_COMMAND_PARITY | PCI_COMMAND_SERR;
9477 pci_write_config_word(tp->pdev, PCI_COMMAND, pci_cmd);
9478 }
9479 }
9480
Michael Chan087fe252005-08-09 20:17:41 -07009481 /* 5700 BX chips need to have their TX producer index mailboxes
9482 * written twice to workaround a bug.
9483 */
9484 if (GET_CHIP_REV(tp->pci_chip_rev_id) == CHIPREV_5700_BX)
9485 tp->tg3_flags |= TG3_FLAG_TXD_MBOX_HWBUG;
9486
Linus Torvalds1da177e2005-04-16 15:20:36 -07009487 /* Back to back register writes can cause problems on this chip,
9488 * the workaround is to read back all reg writes except those to
9489 * mailbox regs. See tg3_write_indirect_reg32().
9490 *
9491 * PCI Express 5750_A0 rev chips need this workaround too.
9492 */
9493 if (GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_5701 ||
9494 ((tp->tg3_flags2 & TG3_FLG2_PCI_EXPRESS) &&
9495 tp->pci_chip_rev_id == CHIPREV_ID_5750_A0))
9496 tp->tg3_flags |= TG3_FLAG_5701_REG_WRITE_BUG;
9497
9498 if ((pci_state_reg & PCISTATE_BUS_SPEED_HIGH) != 0)
9499 tp->tg3_flags |= TG3_FLAG_PCI_HIGH_SPEED;
9500 if ((pci_state_reg & PCISTATE_BUS_32BIT) != 0)
9501 tp->tg3_flags |= TG3_FLAG_PCI_32BIT;
9502
9503 /* Chip-specific fixup from Broadcom driver */
9504 if ((tp->pci_chip_rev_id == CHIPREV_ID_5704_A0) &&
9505 (!(pci_state_reg & PCISTATE_RETRY_SAME_DMA))) {
9506 pci_state_reg |= PCISTATE_RETRY_SAME_DMA;
9507 pci_write_config_dword(tp->pdev, TG3PCI_PCISTATE, pci_state_reg);
9508 }
9509
Michael Chan1ee582d2005-08-09 20:16:46 -07009510 /* Default fast path register access methods */
Michael Chan20094932005-08-09 20:16:32 -07009511 tp->read32 = tg3_read32;
Michael Chan1ee582d2005-08-09 20:16:46 -07009512 tp->write32 = tg3_write32;
Michael Chan09ee9292005-08-09 20:17:00 -07009513 tp->read32_mbox = tg3_read32;
Michael Chan20094932005-08-09 20:16:32 -07009514 tp->write32_mbox = tg3_write32;
Michael Chan1ee582d2005-08-09 20:16:46 -07009515 tp->write32_tx_mbox = tg3_write32;
9516 tp->write32_rx_mbox = tg3_write32;
9517
9518 /* Various workaround register access methods */
9519 if (tp->tg3_flags & TG3_FLAG_PCIX_TARGET_HWBUG)
9520 tp->write32 = tg3_write_indirect_reg32;
9521 else if (tp->tg3_flags & TG3_FLAG_5701_REG_WRITE_BUG)
9522 tp->write32 = tg3_write_flush_reg32;
9523
9524 if ((tp->tg3_flags & TG3_FLAG_TXD_MBOX_HWBUG) ||
9525 (tp->tg3_flags & TG3_FLAG_MBOX_WRITE_REORDER)) {
9526 tp->write32_tx_mbox = tg3_write32_tx_mbox;
9527 if (tp->tg3_flags & TG3_FLAG_MBOX_WRITE_REORDER)
9528 tp->write32_rx_mbox = tg3_write_flush_reg32;
9529 }
Michael Chan20094932005-08-09 20:16:32 -07009530
Michael Chan68929142005-08-09 20:17:14 -07009531 if (tp->tg3_flags2 & TG3_FLG2_ICH_WORKAROUND) {
9532 tp->read32 = tg3_read_indirect_reg32;
9533 tp->write32 = tg3_write_indirect_reg32;
9534 tp->read32_mbox = tg3_read_indirect_mbox;
9535 tp->write32_mbox = tg3_write_indirect_mbox;
9536 tp->write32_tx_mbox = tg3_write_indirect_mbox;
9537 tp->write32_rx_mbox = tg3_write_indirect_mbox;
9538
9539 iounmap(tp->regs);
9540 tp->regs = 0;
9541
9542 pci_read_config_word(tp->pdev, PCI_COMMAND, &pci_cmd);
9543 pci_cmd &= ~PCI_COMMAND_MEMORY;
9544 pci_write_config_word(tp->pdev, PCI_COMMAND, pci_cmd);
9545 }
9546
Michael Chan7d0c41e2005-04-21 17:06:20 -07009547 /* Get eeprom hw config before calling tg3_set_power_state().
9548 * In particular, the TG3_FLAG_EEPROM_WRITE_PROT flag must be
9549 * determined before calling tg3_set_power_state() so that
9550 * we know whether or not to switch out of Vaux power.
9551 * When the flag is set, it means that GPIO1 is used for eeprom
9552 * write protect and also implies that it is a LOM where GPIOs
9553 * are not used to switch power.
9554 */
9555 tg3_get_eeprom_hw_cfg(tp);
9556
Michael Chan314fba32005-04-21 17:07:04 -07009557 /* Set up tp->grc_local_ctrl before calling tg3_set_power_state().
9558 * GPIO1 driven high will bring 5700's external PHY out of reset.
9559 * It is also used as eeprom write protect on LOMs.
9560 */
9561 tp->grc_local_ctrl = GRC_LCLCTRL_INT_ON_ATTN | GRC_LCLCTRL_AUTO_SEEPROM;
9562 if ((GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_5700) ||
9563 (tp->tg3_flags & TG3_FLAG_EEPROM_WRITE_PROT))
9564 tp->grc_local_ctrl |= (GRC_LCLCTRL_GPIO_OE1 |
9565 GRC_LCLCTRL_GPIO_OUTPUT1);
Michael Chan3e7d83b2005-04-21 17:10:36 -07009566 /* Unused GPIO3 must be driven as output on 5752 because there
9567 * are no pull-up resistors on unused GPIO pins.
9568 */
9569 else if (GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_5752)
9570 tp->grc_local_ctrl |= GRC_LCLCTRL_GPIO_OE3;
Michael Chan314fba32005-04-21 17:07:04 -07009571
Linus Torvalds1da177e2005-04-16 15:20:36 -07009572 /* Force the chip into D0. */
9573 err = tg3_set_power_state(tp, 0);
9574 if (err) {
9575 printk(KERN_ERR PFX "(%s) transition to D0 failed\n",
9576 pci_name(tp->pdev));
9577 return err;
9578 }
9579
9580 /* 5700 B0 chips do not support checksumming correctly due
9581 * to hardware bugs.
9582 */
9583 if (tp->pci_chip_rev_id == CHIPREV_ID_5700_B0)
9584 tp->tg3_flags |= TG3_FLAG_BROKEN_CHECKSUMS;
9585
9586 /* Pseudo-header checksum is done by hardware logic and not
9587 * the offload processers, so make the chip do the pseudo-
9588 * header checksums on receive. For transmit it is more
9589 * convenient to do the pseudo-header checksum in software
9590 * as Linux does that on transmit for us in all cases.
9591 */
9592 tp->tg3_flags |= TG3_FLAG_NO_TX_PSEUDO_CSUM;
9593 tp->tg3_flags &= ~TG3_FLAG_NO_RX_PSEUDO_CSUM;
9594
9595 /* Derive initial jumbo mode from MTU assigned in
9596 * ether_setup() via the alloc_etherdev() call
9597 */
Michael Chan0f893dc2005-07-25 12:30:38 -07009598 if (tp->dev->mtu > ETH_DATA_LEN &&
9599 GET_ASIC_REV(tp->pci_chip_rev_id) != ASIC_REV_5780)
9600 tp->tg3_flags |= TG3_FLAG_JUMBO_RING_ENABLE;
Linus Torvalds1da177e2005-04-16 15:20:36 -07009601
9602 /* Determine WakeOnLan speed to use. */
9603 if (GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_5700 ||
9604 tp->pci_chip_rev_id == CHIPREV_ID_5701_A0 ||
9605 tp->pci_chip_rev_id == CHIPREV_ID_5701_B0 ||
9606 tp->pci_chip_rev_id == CHIPREV_ID_5701_B2) {
9607 tp->tg3_flags &= ~(TG3_FLAG_WOL_SPEED_100MB);
9608 } else {
9609 tp->tg3_flags |= TG3_FLAG_WOL_SPEED_100MB;
9610 }
9611
9612 /* A few boards don't want Ethernet@WireSpeed phy feature */
9613 if ((GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_5700) ||
9614 ((GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_5705) &&
9615 (tp->pci_chip_rev_id != CHIPREV_ID_5705_A0) &&
Michael Chan747e8f82005-07-25 12:33:22 -07009616 (tp->pci_chip_rev_id != CHIPREV_ID_5705_A1)) ||
9617 (tp->tg3_flags2 & TG3_FLG2_ANY_SERDES))
Linus Torvalds1da177e2005-04-16 15:20:36 -07009618 tp->tg3_flags2 |= TG3_FLG2_NO_ETH_WIRE_SPEED;
9619
9620 if (GET_CHIP_REV(tp->pci_chip_rev_id) == CHIPREV_5703_AX ||
9621 GET_CHIP_REV(tp->pci_chip_rev_id) == CHIPREV_5704_AX)
9622 tp->tg3_flags2 |= TG3_FLG2_PHY_ADC_BUG;
9623 if (tp->pci_chip_rev_id == CHIPREV_ID_5704_A0)
9624 tp->tg3_flags2 |= TG3_FLG2_PHY_5704_A0_BUG;
9625
John W. Linvillebb7064d2005-04-21 17:02:41 -07009626 if (tp->tg3_flags2 & TG3_FLG2_5705_PLUS)
Linus Torvalds1da177e2005-04-16 15:20:36 -07009627 tp->tg3_flags2 |= TG3_FLG2_PHY_BER_BUG;
9628
Linus Torvalds1da177e2005-04-16 15:20:36 -07009629 tp->coalesce_mode = 0;
Linus Torvalds1da177e2005-04-16 15:20:36 -07009630 if (GET_CHIP_REV(tp->pci_chip_rev_id) != CHIPREV_5700_AX &&
9631 GET_CHIP_REV(tp->pci_chip_rev_id) != CHIPREV_5700_BX)
9632 tp->coalesce_mode |= HOSTCC_MODE_32BYTE;
9633
9634 /* Initialize MAC MI mode, polling disabled. */
9635 tw32_f(MAC_MI_MODE, tp->mi_mode);
9636 udelay(80);
9637
9638 /* Initialize data/descriptor byte/word swapping. */
9639 val = tr32(GRC_MODE);
9640 val &= GRC_MODE_HOST_STACKUP;
9641 tw32(GRC_MODE, val | tp->grc_mode);
9642
9643 tg3_switch_clocks(tp);
9644
9645 /* Clear this out for sanity. */
9646 tw32(TG3PCI_MEM_WIN_BASE_ADDR, 0);
9647
9648 pci_read_config_dword(tp->pdev, TG3PCI_PCISTATE,
9649 &pci_state_reg);
9650 if ((pci_state_reg & PCISTATE_CONV_PCI_MODE) == 0 &&
9651 (tp->tg3_flags & TG3_FLAG_PCIX_TARGET_HWBUG) == 0) {
9652 u32 chiprevid = GET_CHIP_REV_ID(tp->misc_host_ctrl);
9653
9654 if (chiprevid == CHIPREV_ID_5701_A0 ||
9655 chiprevid == CHIPREV_ID_5701_B0 ||
9656 chiprevid == CHIPREV_ID_5701_B2 ||
9657 chiprevid == CHIPREV_ID_5701_B5) {
9658 void __iomem *sram_base;
9659
9660 /* Write some dummy words into the SRAM status block
9661 * area, see if it reads back correctly. If the return
9662 * value is bad, force enable the PCIX workaround.
9663 */
9664 sram_base = tp->regs + NIC_SRAM_WIN_BASE + NIC_SRAM_STATS_BLK;
9665
9666 writel(0x00000000, sram_base);
9667 writel(0x00000000, sram_base + 4);
9668 writel(0xffffffff, sram_base + 4);
9669 if (readl(sram_base) != 0x00000000)
9670 tp->tg3_flags |= TG3_FLAG_PCIX_TARGET_HWBUG;
9671 }
9672 }
9673
9674 udelay(50);
9675 tg3_nvram_init(tp);
9676
9677 grc_misc_cfg = tr32(GRC_MISC_CFG);
9678 grc_misc_cfg &= GRC_MISC_CFG_BOARD_ID_MASK;
9679
9680 /* Broadcom's driver says that CIOBE multisplit has a bug */
9681#if 0
9682 if (GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_5704 &&
9683 grc_misc_cfg == GRC_MISC_CFG_BOARD_ID_5704CIOBE) {
9684 tp->tg3_flags |= TG3_FLAG_SPLIT_MODE;
9685 tp->split_mode_max_reqs = SPLIT_MODE_5704_MAX_REQ;
9686 }
9687#endif
9688 if (GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_5705 &&
9689 (grc_misc_cfg == GRC_MISC_CFG_BOARD_ID_5788 ||
9690 grc_misc_cfg == GRC_MISC_CFG_BOARD_ID_5788M))
9691 tp->tg3_flags2 |= TG3_FLG2_IS_5788;
9692
David S. Millerfac9b832005-05-18 22:46:34 -07009693 if (!(tp->tg3_flags2 & TG3_FLG2_IS_5788) &&
9694 (GET_ASIC_REV(tp->pci_chip_rev_id) != ASIC_REV_5700))
9695 tp->tg3_flags |= TG3_FLAG_TAGGED_STATUS;
9696 if (tp->tg3_flags & TG3_FLAG_TAGGED_STATUS) {
9697 tp->coalesce_mode |= (HOSTCC_MODE_CLRTICK_RXBD |
9698 HOSTCC_MODE_CLRTICK_TXBD);
9699
9700 tp->misc_host_ctrl |= MISC_HOST_CTRL_TAGGED_STATUS;
9701 pci_write_config_dword(tp->pdev, TG3PCI_MISC_HOST_CTRL,
9702 tp->misc_host_ctrl);
9703 }
9704
Linus Torvalds1da177e2005-04-16 15:20:36 -07009705 /* these are limited to 10/100 only */
9706 if ((GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_5703 &&
9707 (grc_misc_cfg == 0x8000 || grc_misc_cfg == 0x4000)) ||
9708 (GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_5705 &&
9709 tp->pdev->vendor == PCI_VENDOR_ID_BROADCOM &&
9710 (tp->pdev->device == PCI_DEVICE_ID_TIGON3_5901 ||
9711 tp->pdev->device == PCI_DEVICE_ID_TIGON3_5901_2 ||
9712 tp->pdev->device == PCI_DEVICE_ID_TIGON3_5705F)) ||
9713 (tp->pdev->vendor == PCI_VENDOR_ID_BROADCOM &&
9714 (tp->pdev->device == PCI_DEVICE_ID_TIGON3_5751F ||
9715 tp->pdev->device == PCI_DEVICE_ID_TIGON3_5753F)))
9716 tp->tg3_flags |= TG3_FLAG_10_100_ONLY;
9717
9718 err = tg3_phy_probe(tp);
9719 if (err) {
9720 printk(KERN_ERR PFX "(%s) phy probe failed, err %d\n",
9721 pci_name(tp->pdev), err);
9722 /* ... but do not return immediately ... */
9723 }
9724
9725 tg3_read_partno(tp);
9726
9727 if (tp->tg3_flags2 & TG3_FLG2_PHY_SERDES) {
9728 tp->tg3_flags &= ~TG3_FLAG_USE_MI_INTERRUPT;
9729 } else {
9730 if (GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_5700)
9731 tp->tg3_flags |= TG3_FLAG_USE_MI_INTERRUPT;
9732 else
9733 tp->tg3_flags &= ~TG3_FLAG_USE_MI_INTERRUPT;
9734 }
9735
9736 /* 5700 {AX,BX} chips have a broken status block link
9737 * change bit implementation, so we must use the
9738 * status register in those cases.
9739 */
9740 if (GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_5700)
9741 tp->tg3_flags |= TG3_FLAG_USE_LINKCHG_REG;
9742 else
9743 tp->tg3_flags &= ~TG3_FLAG_USE_LINKCHG_REG;
9744
9745 /* The led_ctrl is set during tg3_phy_probe, here we might
9746 * have to force the link status polling mechanism based
9747 * upon subsystem IDs.
9748 */
9749 if (tp->pdev->subsystem_vendor == PCI_VENDOR_ID_DELL &&
9750 !(tp->tg3_flags2 & TG3_FLG2_PHY_SERDES)) {
9751 tp->tg3_flags |= (TG3_FLAG_USE_MI_INTERRUPT |
9752 TG3_FLAG_USE_LINKCHG_REG);
9753 }
9754
9755 /* For all SERDES we poll the MAC status register. */
9756 if (tp->tg3_flags2 & TG3_FLG2_PHY_SERDES)
9757 tp->tg3_flags |= TG3_FLAG_POLL_SERDES;
9758 else
9759 tp->tg3_flags &= ~TG3_FLAG_POLL_SERDES;
9760
Linus Torvalds1da177e2005-04-16 15:20:36 -07009761 /* It seems all chips can get confused if TX buffers
9762 * straddle the 4GB address boundary in some cases.
9763 */
9764 tp->dev->hard_start_xmit = tg3_start_xmit;
9765
9766 tp->rx_offset = 2;
9767 if (GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_5701 &&
9768 (tp->tg3_flags & TG3_FLAG_PCIX_MODE) != 0)
9769 tp->rx_offset = 0;
9770
9771 /* By default, disable wake-on-lan. User can change this
9772 * using ETHTOOL_SWOL.
9773 */
9774 tp->tg3_flags &= ~TG3_FLAG_WOL_ENABLE;
9775
9776 return err;
9777}
9778
9779#ifdef CONFIG_SPARC64
9780static int __devinit tg3_get_macaddr_sparc(struct tg3 *tp)
9781{
9782 struct net_device *dev = tp->dev;
9783 struct pci_dev *pdev = tp->pdev;
9784 struct pcidev_cookie *pcp = pdev->sysdata;
9785
9786 if (pcp != NULL) {
9787 int node = pcp->prom_node;
9788
9789 if (prom_getproplen(node, "local-mac-address") == 6) {
9790 prom_getproperty(node, "local-mac-address",
9791 dev->dev_addr, 6);
9792 return 0;
9793 }
9794 }
9795 return -ENODEV;
9796}
9797
9798static int __devinit tg3_get_default_macaddr_sparc(struct tg3 *tp)
9799{
9800 struct net_device *dev = tp->dev;
9801
9802 memcpy(dev->dev_addr, idprom->id_ethaddr, 6);
9803 return 0;
9804}
9805#endif
9806
9807static int __devinit tg3_get_device_address(struct tg3 *tp)
9808{
9809 struct net_device *dev = tp->dev;
9810 u32 hi, lo, mac_offset;
9811
9812#ifdef CONFIG_SPARC64
9813 if (!tg3_get_macaddr_sparc(tp))
9814 return 0;
9815#endif
9816
9817 mac_offset = 0x7c;
Michael Chan4cf78e42005-07-25 12:29:19 -07009818 if ((GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_5704 &&
9819 !(tp->tg3_flags & TG3_FLG2_SUN_570X)) ||
9820 GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_5780) {
Linus Torvalds1da177e2005-04-16 15:20:36 -07009821 if (tr32(TG3PCI_DUAL_MAC_CTRL) & DUAL_MAC_CTRL_ID)
9822 mac_offset = 0xcc;
9823 if (tg3_nvram_lock(tp))
9824 tw32_f(NVRAM_CMD, NVRAM_CMD_RESET);
9825 else
9826 tg3_nvram_unlock(tp);
9827 }
9828
9829 /* First try to get it from MAC address mailbox. */
9830 tg3_read_mem(tp, NIC_SRAM_MAC_ADDR_HIGH_MBOX, &hi);
9831 if ((hi >> 16) == 0x484b) {
9832 dev->dev_addr[0] = (hi >> 8) & 0xff;
9833 dev->dev_addr[1] = (hi >> 0) & 0xff;
9834
9835 tg3_read_mem(tp, NIC_SRAM_MAC_ADDR_LOW_MBOX, &lo);
9836 dev->dev_addr[2] = (lo >> 24) & 0xff;
9837 dev->dev_addr[3] = (lo >> 16) & 0xff;
9838 dev->dev_addr[4] = (lo >> 8) & 0xff;
9839 dev->dev_addr[5] = (lo >> 0) & 0xff;
9840 }
9841 /* Next, try NVRAM. */
9842 else if (!(tp->tg3_flags & TG3_FLG2_SUN_570X) &&
9843 !tg3_nvram_read(tp, mac_offset + 0, &hi) &&
9844 !tg3_nvram_read(tp, mac_offset + 4, &lo)) {
9845 dev->dev_addr[0] = ((hi >> 16) & 0xff);
9846 dev->dev_addr[1] = ((hi >> 24) & 0xff);
9847 dev->dev_addr[2] = ((lo >> 0) & 0xff);
9848 dev->dev_addr[3] = ((lo >> 8) & 0xff);
9849 dev->dev_addr[4] = ((lo >> 16) & 0xff);
9850 dev->dev_addr[5] = ((lo >> 24) & 0xff);
9851 }
9852 /* Finally just fetch it out of the MAC control regs. */
9853 else {
9854 hi = tr32(MAC_ADDR_0_HIGH);
9855 lo = tr32(MAC_ADDR_0_LOW);
9856
9857 dev->dev_addr[5] = lo & 0xff;
9858 dev->dev_addr[4] = (lo >> 8) & 0xff;
9859 dev->dev_addr[3] = (lo >> 16) & 0xff;
9860 dev->dev_addr[2] = (lo >> 24) & 0xff;
9861 dev->dev_addr[1] = hi & 0xff;
9862 dev->dev_addr[0] = (hi >> 8) & 0xff;
9863 }
9864
9865 if (!is_valid_ether_addr(&dev->dev_addr[0])) {
9866#ifdef CONFIG_SPARC64
9867 if (!tg3_get_default_macaddr_sparc(tp))
9868 return 0;
9869#endif
9870 return -EINVAL;
9871 }
9872 return 0;
9873}
9874
David S. Miller59e6b432005-05-18 22:50:10 -07009875#define BOUNDARY_SINGLE_CACHELINE 1
9876#define BOUNDARY_MULTI_CACHELINE 2
9877
9878static u32 __devinit tg3_calc_dma_bndry(struct tg3 *tp, u32 val)
9879{
9880 int cacheline_size;
9881 u8 byte;
9882 int goal;
9883
9884 pci_read_config_byte(tp->pdev, PCI_CACHE_LINE_SIZE, &byte);
9885 if (byte == 0)
9886 cacheline_size = 1024;
9887 else
9888 cacheline_size = (int) byte * 4;
9889
9890 /* On 5703 and later chips, the boundary bits have no
9891 * effect.
9892 */
9893 if (GET_ASIC_REV(tp->pci_chip_rev_id) != ASIC_REV_5700 &&
9894 GET_ASIC_REV(tp->pci_chip_rev_id) != ASIC_REV_5701 &&
9895 !(tp->tg3_flags2 & TG3_FLG2_PCI_EXPRESS))
9896 goto out;
9897
9898#if defined(CONFIG_PPC64) || defined(CONFIG_IA64) || defined(CONFIG_PARISC)
9899 goal = BOUNDARY_MULTI_CACHELINE;
9900#else
9901#if defined(CONFIG_SPARC64) || defined(CONFIG_ALPHA)
9902 goal = BOUNDARY_SINGLE_CACHELINE;
9903#else
9904 goal = 0;
9905#endif
9906#endif
9907
9908 if (!goal)
9909 goto out;
9910
9911 /* PCI controllers on most RISC systems tend to disconnect
9912 * when a device tries to burst across a cache-line boundary.
9913 * Therefore, letting tg3 do so just wastes PCI bandwidth.
9914 *
9915 * Unfortunately, for PCI-E there are only limited
9916 * write-side controls for this, and thus for reads
9917 * we will still get the disconnects. We'll also waste
9918 * these PCI cycles for both read and write for chips
9919 * other than 5700 and 5701 which do not implement the
9920 * boundary bits.
9921 */
9922 if ((tp->tg3_flags & TG3_FLAG_PCIX_MODE) &&
9923 !(tp->tg3_flags2 & TG3_FLG2_PCI_EXPRESS)) {
9924 switch (cacheline_size) {
9925 case 16:
9926 case 32:
9927 case 64:
9928 case 128:
9929 if (goal == BOUNDARY_SINGLE_CACHELINE) {
9930 val |= (DMA_RWCTRL_READ_BNDRY_128_PCIX |
9931 DMA_RWCTRL_WRITE_BNDRY_128_PCIX);
9932 } else {
9933 val |= (DMA_RWCTRL_READ_BNDRY_384_PCIX |
9934 DMA_RWCTRL_WRITE_BNDRY_384_PCIX);
9935 }
9936 break;
9937
9938 case 256:
9939 val |= (DMA_RWCTRL_READ_BNDRY_256_PCIX |
9940 DMA_RWCTRL_WRITE_BNDRY_256_PCIX);
9941 break;
9942
9943 default:
9944 val |= (DMA_RWCTRL_READ_BNDRY_384_PCIX |
9945 DMA_RWCTRL_WRITE_BNDRY_384_PCIX);
9946 break;
9947 };
9948 } else if (tp->tg3_flags2 & TG3_FLG2_PCI_EXPRESS) {
9949 switch (cacheline_size) {
9950 case 16:
9951 case 32:
9952 case 64:
9953 if (goal == BOUNDARY_SINGLE_CACHELINE) {
9954 val &= ~DMA_RWCTRL_WRITE_BNDRY_DISAB_PCIE;
9955 val |= DMA_RWCTRL_WRITE_BNDRY_64_PCIE;
9956 break;
9957 }
9958 /* fallthrough */
9959 case 128:
9960 default:
9961 val &= ~DMA_RWCTRL_WRITE_BNDRY_DISAB_PCIE;
9962 val |= DMA_RWCTRL_WRITE_BNDRY_128_PCIE;
9963 break;
9964 };
9965 } else {
9966 switch (cacheline_size) {
9967 case 16:
9968 if (goal == BOUNDARY_SINGLE_CACHELINE) {
9969 val |= (DMA_RWCTRL_READ_BNDRY_16 |
9970 DMA_RWCTRL_WRITE_BNDRY_16);
9971 break;
9972 }
9973 /* fallthrough */
9974 case 32:
9975 if (goal == BOUNDARY_SINGLE_CACHELINE) {
9976 val |= (DMA_RWCTRL_READ_BNDRY_32 |
9977 DMA_RWCTRL_WRITE_BNDRY_32);
9978 break;
9979 }
9980 /* fallthrough */
9981 case 64:
9982 if (goal == BOUNDARY_SINGLE_CACHELINE) {
9983 val |= (DMA_RWCTRL_READ_BNDRY_64 |
9984 DMA_RWCTRL_WRITE_BNDRY_64);
9985 break;
9986 }
9987 /* fallthrough */
9988 case 128:
9989 if (goal == BOUNDARY_SINGLE_CACHELINE) {
9990 val |= (DMA_RWCTRL_READ_BNDRY_128 |
9991 DMA_RWCTRL_WRITE_BNDRY_128);
9992 break;
9993 }
9994 /* fallthrough */
9995 case 256:
9996 val |= (DMA_RWCTRL_READ_BNDRY_256 |
9997 DMA_RWCTRL_WRITE_BNDRY_256);
9998 break;
9999 case 512:
10000 val |= (DMA_RWCTRL_READ_BNDRY_512 |
10001 DMA_RWCTRL_WRITE_BNDRY_512);
10002 break;
10003 case 1024:
10004 default:
10005 val |= (DMA_RWCTRL_READ_BNDRY_1024 |
10006 DMA_RWCTRL_WRITE_BNDRY_1024);
10007 break;
10008 };
10009 }
10010
10011out:
10012 return val;
10013}
10014
Linus Torvalds1da177e2005-04-16 15:20:36 -070010015static int __devinit tg3_do_test_dma(struct tg3 *tp, u32 *buf, dma_addr_t buf_dma, int size, int to_device)
10016{
10017 struct tg3_internal_buffer_desc test_desc;
10018 u32 sram_dma_descs;
10019 int i, ret;
10020
10021 sram_dma_descs = NIC_SRAM_DMA_DESC_POOL_BASE;
10022
10023 tw32(FTQ_RCVBD_COMP_FIFO_ENQDEQ, 0);
10024 tw32(FTQ_RCVDATA_COMP_FIFO_ENQDEQ, 0);
10025 tw32(RDMAC_STATUS, 0);
10026 tw32(WDMAC_STATUS, 0);
10027
10028 tw32(BUFMGR_MODE, 0);
10029 tw32(FTQ_RESET, 0);
10030
10031 test_desc.addr_hi = ((u64) buf_dma) >> 32;
10032 test_desc.addr_lo = buf_dma & 0xffffffff;
10033 test_desc.nic_mbuf = 0x00002100;
10034 test_desc.len = size;
10035
10036 /*
10037 * HP ZX1 was seeing test failures for 5701 cards running at 33Mhz
10038 * the *second* time the tg3 driver was getting loaded after an
10039 * initial scan.
10040 *
10041 * Broadcom tells me:
10042 * ...the DMA engine is connected to the GRC block and a DMA
10043 * reset may affect the GRC block in some unpredictable way...
10044 * The behavior of resets to individual blocks has not been tested.
10045 *
10046 * Broadcom noted the GRC reset will also reset all sub-components.
10047 */
10048 if (to_device) {
10049 test_desc.cqid_sqid = (13 << 8) | 2;
10050
10051 tw32_f(RDMAC_MODE, RDMAC_MODE_ENABLE);
10052 udelay(40);
10053 } else {
10054 test_desc.cqid_sqid = (16 << 8) | 7;
10055
10056 tw32_f(WDMAC_MODE, WDMAC_MODE_ENABLE);
10057 udelay(40);
10058 }
10059 test_desc.flags = 0x00000005;
10060
10061 for (i = 0; i < (sizeof(test_desc) / sizeof(u32)); i++) {
10062 u32 val;
10063
10064 val = *(((u32 *)&test_desc) + i);
10065 pci_write_config_dword(tp->pdev, TG3PCI_MEM_WIN_BASE_ADDR,
10066 sram_dma_descs + (i * sizeof(u32)));
10067 pci_write_config_dword(tp->pdev, TG3PCI_MEM_WIN_DATA, val);
10068 }
10069 pci_write_config_dword(tp->pdev, TG3PCI_MEM_WIN_BASE_ADDR, 0);
10070
10071 if (to_device) {
10072 tw32(FTQ_DMA_HIGH_READ_FIFO_ENQDEQ, sram_dma_descs);
10073 } else {
10074 tw32(FTQ_DMA_HIGH_WRITE_FIFO_ENQDEQ, sram_dma_descs);
10075 }
10076
10077 ret = -ENODEV;
10078 for (i = 0; i < 40; i++) {
10079 u32 val;
10080
10081 if (to_device)
10082 val = tr32(FTQ_RCVBD_COMP_FIFO_ENQDEQ);
10083 else
10084 val = tr32(FTQ_RCVDATA_COMP_FIFO_ENQDEQ);
10085 if ((val & 0xffff) == sram_dma_descs) {
10086 ret = 0;
10087 break;
10088 }
10089
10090 udelay(100);
10091 }
10092
10093 return ret;
10094}
10095
David S. Millerded73402005-05-23 13:59:47 -070010096#define TEST_BUFFER_SIZE 0x2000
Linus Torvalds1da177e2005-04-16 15:20:36 -070010097
10098static int __devinit tg3_test_dma(struct tg3 *tp)
10099{
10100 dma_addr_t buf_dma;
David S. Miller59e6b432005-05-18 22:50:10 -070010101 u32 *buf, saved_dma_rwctrl;
Linus Torvalds1da177e2005-04-16 15:20:36 -070010102 int ret;
10103
10104 buf = pci_alloc_consistent(tp->pdev, TEST_BUFFER_SIZE, &buf_dma);
10105 if (!buf) {
10106 ret = -ENOMEM;
10107 goto out_nofree;
10108 }
10109
10110 tp->dma_rwctrl = ((0x7 << DMA_RWCTRL_PCI_WRITE_CMD_SHIFT) |
10111 (0x6 << DMA_RWCTRL_PCI_READ_CMD_SHIFT));
10112
David S. Miller59e6b432005-05-18 22:50:10 -070010113 tp->dma_rwctrl = tg3_calc_dma_bndry(tp, tp->dma_rwctrl);
Linus Torvalds1da177e2005-04-16 15:20:36 -070010114
10115 if (tp->tg3_flags2 & TG3_FLG2_PCI_EXPRESS) {
10116 /* DMA read watermark not used on PCIE */
10117 tp->dma_rwctrl |= 0x00180000;
10118 } else if (!(tp->tg3_flags & TG3_FLAG_PCIX_MODE)) {
Michael Chan85e94ce2005-04-21 17:05:28 -070010119 if (GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_5705 ||
10120 GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_5750)
Linus Torvalds1da177e2005-04-16 15:20:36 -070010121 tp->dma_rwctrl |= 0x003f0000;
10122 else
10123 tp->dma_rwctrl |= 0x003f000f;
10124 } else {
10125 if (GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_5703 ||
10126 GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_5704) {
10127 u32 ccval = (tr32(TG3PCI_CLOCK_CTRL) & 0x1f);
10128
10129 if (ccval == 0x6 || ccval == 0x7)
10130 tp->dma_rwctrl |= DMA_RWCTRL_ONE_DMA;
10131
David S. Miller59e6b432005-05-18 22:50:10 -070010132 /* Set bit 23 to enable PCIX hw bug fix */
Linus Torvalds1da177e2005-04-16 15:20:36 -070010133 tp->dma_rwctrl |= 0x009f0000;
Michael Chan4cf78e42005-07-25 12:29:19 -070010134 } else if (GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_5780) {
10135 /* 5780 always in PCIX mode */
10136 tp->dma_rwctrl |= 0x00144000;
Linus Torvalds1da177e2005-04-16 15:20:36 -070010137 } else {
10138 tp->dma_rwctrl |= 0x001b000f;
10139 }
10140 }
10141
10142 if (GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_5703 ||
10143 GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_5704)
10144 tp->dma_rwctrl &= 0xfffffff0;
10145
10146 if (GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_5700 ||
10147 GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_5701) {
10148 /* Remove this if it causes problems for some boards. */
10149 tp->dma_rwctrl |= DMA_RWCTRL_USE_MEM_READ_MULT;
10150
10151 /* On 5700/5701 chips, we need to set this bit.
10152 * Otherwise the chip will issue cacheline transactions
10153 * to streamable DMA memory with not all the byte
10154 * enables turned on. This is an error on several
10155 * RISC PCI controllers, in particular sparc64.
10156 *
10157 * On 5703/5704 chips, this bit has been reassigned
10158 * a different meaning. In particular, it is used
10159 * on those chips to enable a PCI-X workaround.
10160 */
10161 tp->dma_rwctrl |= DMA_RWCTRL_ASSERT_ALL_BE;
10162 }
10163
10164 tw32(TG3PCI_DMA_RW_CTRL, tp->dma_rwctrl);
10165
10166#if 0
10167 /* Unneeded, already done by tg3_get_invariants. */
10168 tg3_switch_clocks(tp);
10169#endif
10170
10171 ret = 0;
10172 if (GET_ASIC_REV(tp->pci_chip_rev_id) != ASIC_REV_5700 &&
10173 GET_ASIC_REV(tp->pci_chip_rev_id) != ASIC_REV_5701)
10174 goto out;
10175
David S. Miller59e6b432005-05-18 22:50:10 -070010176 /* It is best to perform DMA test with maximum write burst size
10177 * to expose the 5700/5701 write DMA bug.
10178 */
10179 saved_dma_rwctrl = tp->dma_rwctrl;
10180 tp->dma_rwctrl &= ~DMA_RWCTRL_WRITE_BNDRY_MASK;
10181 tw32(TG3PCI_DMA_RW_CTRL, tp->dma_rwctrl);
10182
Linus Torvalds1da177e2005-04-16 15:20:36 -070010183 while (1) {
10184 u32 *p = buf, i;
10185
10186 for (i = 0; i < TEST_BUFFER_SIZE / sizeof(u32); i++)
10187 p[i] = i;
10188
10189 /* Send the buffer to the chip. */
10190 ret = tg3_do_test_dma(tp, buf, buf_dma, TEST_BUFFER_SIZE, 1);
10191 if (ret) {
10192 printk(KERN_ERR "tg3_test_dma() Write the buffer failed %d\n", ret);
10193 break;
10194 }
10195
10196#if 0
10197 /* validate data reached card RAM correctly. */
10198 for (i = 0; i < TEST_BUFFER_SIZE / sizeof(u32); i++) {
10199 u32 val;
10200 tg3_read_mem(tp, 0x2100 + (i*4), &val);
10201 if (le32_to_cpu(val) != p[i]) {
10202 printk(KERN_ERR " tg3_test_dma() Card buffer corrupted on write! (%d != %d)\n", val, i);
10203 /* ret = -ENODEV here? */
10204 }
10205 p[i] = 0;
10206 }
10207#endif
10208 /* Now read it back. */
10209 ret = tg3_do_test_dma(tp, buf, buf_dma, TEST_BUFFER_SIZE, 0);
10210 if (ret) {
10211 printk(KERN_ERR "tg3_test_dma() Read the buffer failed %d\n", ret);
10212
10213 break;
10214 }
10215
10216 /* Verify it. */
10217 for (i = 0; i < TEST_BUFFER_SIZE / sizeof(u32); i++) {
10218 if (p[i] == i)
10219 continue;
10220
David S. Miller59e6b432005-05-18 22:50:10 -070010221 if ((tp->dma_rwctrl & DMA_RWCTRL_WRITE_BNDRY_MASK) !=
10222 DMA_RWCTRL_WRITE_BNDRY_16) {
10223 tp->dma_rwctrl &= ~DMA_RWCTRL_WRITE_BNDRY_MASK;
Linus Torvalds1da177e2005-04-16 15:20:36 -070010224 tp->dma_rwctrl |= DMA_RWCTRL_WRITE_BNDRY_16;
10225 tw32(TG3PCI_DMA_RW_CTRL, tp->dma_rwctrl);
10226 break;
10227 } else {
10228 printk(KERN_ERR "tg3_test_dma() buffer corrupted on read back! (%d != %d)\n", p[i], i);
10229 ret = -ENODEV;
10230 goto out;
10231 }
10232 }
10233
10234 if (i == (TEST_BUFFER_SIZE / sizeof(u32))) {
10235 /* Success. */
10236 ret = 0;
10237 break;
10238 }
10239 }
David S. Miller59e6b432005-05-18 22:50:10 -070010240 if ((tp->dma_rwctrl & DMA_RWCTRL_WRITE_BNDRY_MASK) !=
10241 DMA_RWCTRL_WRITE_BNDRY_16) {
Michael Chan6d1cfba2005-06-08 14:13:14 -070010242 static struct pci_device_id dma_wait_state_chipsets[] = {
10243 { PCI_DEVICE(PCI_VENDOR_ID_APPLE,
10244 PCI_DEVICE_ID_APPLE_UNI_N_PCI15) },
10245 { },
10246 };
10247
David S. Miller59e6b432005-05-18 22:50:10 -070010248 /* DMA test passed without adjusting DMA boundary,
Michael Chan6d1cfba2005-06-08 14:13:14 -070010249 * now look for chipsets that are known to expose the
10250 * DMA bug without failing the test.
David S. Miller59e6b432005-05-18 22:50:10 -070010251 */
Michael Chan6d1cfba2005-06-08 14:13:14 -070010252 if (pci_dev_present(dma_wait_state_chipsets)) {
10253 tp->dma_rwctrl &= ~DMA_RWCTRL_WRITE_BNDRY_MASK;
10254 tp->dma_rwctrl |= DMA_RWCTRL_WRITE_BNDRY_16;
10255 }
10256 else
10257 /* Safe to use the calculated DMA boundary. */
10258 tp->dma_rwctrl = saved_dma_rwctrl;
10259
David S. Miller59e6b432005-05-18 22:50:10 -070010260 tw32(TG3PCI_DMA_RW_CTRL, tp->dma_rwctrl);
10261 }
Linus Torvalds1da177e2005-04-16 15:20:36 -070010262
10263out:
10264 pci_free_consistent(tp->pdev, TEST_BUFFER_SIZE, buf, buf_dma);
10265out_nofree:
10266 return ret;
10267}
10268
10269static void __devinit tg3_init_link_config(struct tg3 *tp)
10270{
10271 tp->link_config.advertising =
10272 (ADVERTISED_10baseT_Half | ADVERTISED_10baseT_Full |
10273 ADVERTISED_100baseT_Half | ADVERTISED_100baseT_Full |
10274 ADVERTISED_1000baseT_Half | ADVERTISED_1000baseT_Full |
10275 ADVERTISED_Autoneg | ADVERTISED_MII);
10276 tp->link_config.speed = SPEED_INVALID;
10277 tp->link_config.duplex = DUPLEX_INVALID;
10278 tp->link_config.autoneg = AUTONEG_ENABLE;
10279 netif_carrier_off(tp->dev);
10280 tp->link_config.active_speed = SPEED_INVALID;
10281 tp->link_config.active_duplex = DUPLEX_INVALID;
10282 tp->link_config.phy_is_low_power = 0;
10283 tp->link_config.orig_speed = SPEED_INVALID;
10284 tp->link_config.orig_duplex = DUPLEX_INVALID;
10285 tp->link_config.orig_autoneg = AUTONEG_INVALID;
10286}
10287
10288static void __devinit tg3_init_bufmgr_config(struct tg3 *tp)
10289{
Michael Chanfdfec172005-07-25 12:31:48 -070010290 if (tp->tg3_flags2 & TG3_FLG2_5705_PLUS) {
10291 tp->bufmgr_config.mbuf_read_dma_low_water =
10292 DEFAULT_MB_RDMA_LOW_WATER_5705;
10293 tp->bufmgr_config.mbuf_mac_rx_low_water =
10294 DEFAULT_MB_MACRX_LOW_WATER_5705;
10295 tp->bufmgr_config.mbuf_high_water =
10296 DEFAULT_MB_HIGH_WATER_5705;
Linus Torvalds1da177e2005-04-16 15:20:36 -070010297
Michael Chanfdfec172005-07-25 12:31:48 -070010298 tp->bufmgr_config.mbuf_read_dma_low_water_jumbo =
10299 DEFAULT_MB_RDMA_LOW_WATER_JUMBO_5780;
10300 tp->bufmgr_config.mbuf_mac_rx_low_water_jumbo =
10301 DEFAULT_MB_MACRX_LOW_WATER_JUMBO_5780;
10302 tp->bufmgr_config.mbuf_high_water_jumbo =
10303 DEFAULT_MB_HIGH_WATER_JUMBO_5780;
10304 } else {
10305 tp->bufmgr_config.mbuf_read_dma_low_water =
10306 DEFAULT_MB_RDMA_LOW_WATER;
10307 tp->bufmgr_config.mbuf_mac_rx_low_water =
10308 DEFAULT_MB_MACRX_LOW_WATER;
10309 tp->bufmgr_config.mbuf_high_water =
10310 DEFAULT_MB_HIGH_WATER;
10311
10312 tp->bufmgr_config.mbuf_read_dma_low_water_jumbo =
10313 DEFAULT_MB_RDMA_LOW_WATER_JUMBO;
10314 tp->bufmgr_config.mbuf_mac_rx_low_water_jumbo =
10315 DEFAULT_MB_MACRX_LOW_WATER_JUMBO;
10316 tp->bufmgr_config.mbuf_high_water_jumbo =
10317 DEFAULT_MB_HIGH_WATER_JUMBO;
10318 }
Linus Torvalds1da177e2005-04-16 15:20:36 -070010319
10320 tp->bufmgr_config.dma_low_water = DEFAULT_DMA_LOW_WATER;
10321 tp->bufmgr_config.dma_high_water = DEFAULT_DMA_HIGH_WATER;
10322}
10323
10324static char * __devinit tg3_phy_string(struct tg3 *tp)
10325{
10326 switch (tp->phy_id & PHY_ID_MASK) {
10327 case PHY_ID_BCM5400: return "5400";
10328 case PHY_ID_BCM5401: return "5401";
10329 case PHY_ID_BCM5411: return "5411";
10330 case PHY_ID_BCM5701: return "5701";
10331 case PHY_ID_BCM5703: return "5703";
10332 case PHY_ID_BCM5704: return "5704";
10333 case PHY_ID_BCM5705: return "5705";
10334 case PHY_ID_BCM5750: return "5750";
Michael Chan85e94ce2005-04-21 17:05:28 -070010335 case PHY_ID_BCM5752: return "5752";
Michael Chan4cf78e42005-07-25 12:29:19 -070010336 case PHY_ID_BCM5780: return "5780";
Linus Torvalds1da177e2005-04-16 15:20:36 -070010337 case PHY_ID_BCM8002: return "8002/serdes";
10338 case 0: return "serdes";
10339 default: return "unknown";
10340 };
10341}
10342
10343static struct pci_dev * __devinit tg3_find_5704_peer(struct tg3 *tp)
10344{
10345 struct pci_dev *peer;
10346 unsigned int func, devnr = tp->pdev->devfn & ~7;
10347
10348 for (func = 0; func < 8; func++) {
10349 peer = pci_get_slot(tp->pdev->bus, devnr | func);
10350 if (peer && peer != tp->pdev)
10351 break;
10352 pci_dev_put(peer);
10353 }
10354 if (!peer || peer == tp->pdev)
10355 BUG();
10356
10357 /*
10358 * We don't need to keep the refcount elevated; there's no way
10359 * to remove one half of this device without removing the other
10360 */
10361 pci_dev_put(peer);
10362
10363 return peer;
10364}
10365
David S. Miller15f98502005-05-18 22:49:26 -070010366static void __devinit tg3_init_coal(struct tg3 *tp)
10367{
10368 struct ethtool_coalesce *ec = &tp->coal;
10369
10370 memset(ec, 0, sizeof(*ec));
10371 ec->cmd = ETHTOOL_GCOALESCE;
10372 ec->rx_coalesce_usecs = LOW_RXCOL_TICKS;
10373 ec->tx_coalesce_usecs = LOW_TXCOL_TICKS;
10374 ec->rx_max_coalesced_frames = LOW_RXMAX_FRAMES;
10375 ec->tx_max_coalesced_frames = LOW_TXMAX_FRAMES;
10376 ec->rx_coalesce_usecs_irq = DEFAULT_RXCOAL_TICK_INT;
10377 ec->tx_coalesce_usecs_irq = DEFAULT_TXCOAL_TICK_INT;
10378 ec->rx_max_coalesced_frames_irq = DEFAULT_RXCOAL_MAXF_INT;
10379 ec->tx_max_coalesced_frames_irq = DEFAULT_TXCOAL_MAXF_INT;
10380 ec->stats_block_coalesce_usecs = DEFAULT_STAT_COAL_TICKS;
10381
10382 if (tp->coalesce_mode & (HOSTCC_MODE_CLRTICK_RXBD |
10383 HOSTCC_MODE_CLRTICK_TXBD)) {
10384 ec->rx_coalesce_usecs = LOW_RXCOL_TICKS_CLRTCKS;
10385 ec->rx_coalesce_usecs_irq = DEFAULT_RXCOAL_TICK_INT_CLRTCKS;
10386 ec->tx_coalesce_usecs = LOW_TXCOL_TICKS_CLRTCKS;
10387 ec->tx_coalesce_usecs_irq = DEFAULT_TXCOAL_TICK_INT_CLRTCKS;
10388 }
Michael Chand244c892005-07-05 14:42:33 -070010389
10390 if (tp->tg3_flags2 & TG3_FLG2_5705_PLUS) {
10391 ec->rx_coalesce_usecs_irq = 0;
10392 ec->tx_coalesce_usecs_irq = 0;
10393 ec->stats_block_coalesce_usecs = 0;
10394 }
David S. Miller15f98502005-05-18 22:49:26 -070010395}
10396
Linus Torvalds1da177e2005-04-16 15:20:36 -070010397static int __devinit tg3_init_one(struct pci_dev *pdev,
10398 const struct pci_device_id *ent)
10399{
10400 static int tg3_version_printed = 0;
10401 unsigned long tg3reg_base, tg3reg_len;
10402 struct net_device *dev;
10403 struct tg3 *tp;
10404 int i, err, pci_using_dac, pm_cap;
10405
10406 if (tg3_version_printed++ == 0)
10407 printk(KERN_INFO "%s", version);
10408
10409 err = pci_enable_device(pdev);
10410 if (err) {
10411 printk(KERN_ERR PFX "Cannot enable PCI device, "
10412 "aborting.\n");
10413 return err;
10414 }
10415
10416 if (!(pci_resource_flags(pdev, 0) & IORESOURCE_MEM)) {
10417 printk(KERN_ERR PFX "Cannot find proper PCI device "
10418 "base address, aborting.\n");
10419 err = -ENODEV;
10420 goto err_out_disable_pdev;
10421 }
10422
10423 err = pci_request_regions(pdev, DRV_MODULE_NAME);
10424 if (err) {
10425 printk(KERN_ERR PFX "Cannot obtain PCI resources, "
10426 "aborting.\n");
10427 goto err_out_disable_pdev;
10428 }
10429
10430 pci_set_master(pdev);
10431
10432 /* Find power-management capability. */
10433 pm_cap = pci_find_capability(pdev, PCI_CAP_ID_PM);
10434 if (pm_cap == 0) {
10435 printk(KERN_ERR PFX "Cannot find PowerManagement capability, "
10436 "aborting.\n");
10437 err = -EIO;
10438 goto err_out_free_res;
10439 }
10440
10441 /* Configure DMA attributes. */
10442 err = pci_set_dma_mask(pdev, 0xffffffffffffffffULL);
10443 if (!err) {
10444 pci_using_dac = 1;
10445 err = pci_set_consistent_dma_mask(pdev, 0xffffffffffffffffULL);
10446 if (err < 0) {
10447 printk(KERN_ERR PFX "Unable to obtain 64 bit DMA "
10448 "for consistent allocations\n");
10449 goto err_out_free_res;
10450 }
10451 } else {
10452 err = pci_set_dma_mask(pdev, 0xffffffffULL);
10453 if (err) {
10454 printk(KERN_ERR PFX "No usable DMA configuration, "
10455 "aborting.\n");
10456 goto err_out_free_res;
10457 }
10458 pci_using_dac = 0;
10459 }
10460
10461 tg3reg_base = pci_resource_start(pdev, 0);
10462 tg3reg_len = pci_resource_len(pdev, 0);
10463
10464 dev = alloc_etherdev(sizeof(*tp));
10465 if (!dev) {
10466 printk(KERN_ERR PFX "Etherdev alloc failed, aborting.\n");
10467 err = -ENOMEM;
10468 goto err_out_free_res;
10469 }
10470
10471 SET_MODULE_OWNER(dev);
10472 SET_NETDEV_DEV(dev, &pdev->dev);
10473
10474 if (pci_using_dac)
10475 dev->features |= NETIF_F_HIGHDMA;
10476 dev->features |= NETIF_F_LLTX;
10477#if TG3_VLAN_TAG_USED
10478 dev->features |= NETIF_F_HW_VLAN_TX | NETIF_F_HW_VLAN_RX;
10479 dev->vlan_rx_register = tg3_vlan_rx_register;
10480 dev->vlan_rx_kill_vid = tg3_vlan_rx_kill_vid;
10481#endif
10482
10483 tp = netdev_priv(dev);
10484 tp->pdev = pdev;
10485 tp->dev = dev;
10486 tp->pm_cap = pm_cap;
10487 tp->mac_mode = TG3_DEF_MAC_MODE;
10488 tp->rx_mode = TG3_DEF_RX_MODE;
10489 tp->tx_mode = TG3_DEF_TX_MODE;
10490 tp->mi_mode = MAC_MI_MODE_BASE;
10491 if (tg3_debug > 0)
10492 tp->msg_enable = tg3_debug;
10493 else
10494 tp->msg_enable = TG3_DEF_MSG_ENABLE;
10495
10496 /* The word/byte swap controls here control register access byte
10497 * swapping. DMA data byte swapping is controlled in the GRC_MODE
10498 * setting below.
10499 */
10500 tp->misc_host_ctrl =
10501 MISC_HOST_CTRL_MASK_PCI_INT |
10502 MISC_HOST_CTRL_WORD_SWAP |
10503 MISC_HOST_CTRL_INDIR_ACCESS |
10504 MISC_HOST_CTRL_PCISTATE_RW;
10505
10506 /* The NONFRM (non-frame) byte/word swap controls take effect
10507 * on descriptor entries, anything which isn't packet data.
10508 *
10509 * The StrongARM chips on the board (one for tx, one for rx)
10510 * are running in big-endian mode.
10511 */
10512 tp->grc_mode = (GRC_MODE_WSWAP_DATA | GRC_MODE_BSWAP_DATA |
10513 GRC_MODE_WSWAP_NONFRM_DATA);
10514#ifdef __BIG_ENDIAN
10515 tp->grc_mode |= GRC_MODE_BSWAP_NONFRM_DATA;
10516#endif
10517 spin_lock_init(&tp->lock);
10518 spin_lock_init(&tp->tx_lock);
10519 spin_lock_init(&tp->indirect_lock);
10520 INIT_WORK(&tp->reset_task, tg3_reset_task, tp);
10521
10522 tp->regs = ioremap_nocache(tg3reg_base, tg3reg_len);
10523 if (tp->regs == 0UL) {
10524 printk(KERN_ERR PFX "Cannot map device registers, "
10525 "aborting.\n");
10526 err = -ENOMEM;
10527 goto err_out_free_dev;
10528 }
10529
10530 tg3_init_link_config(tp);
10531
Linus Torvalds1da177e2005-04-16 15:20:36 -070010532 tp->rx_pending = TG3_DEF_RX_RING_PENDING;
10533 tp->rx_jumbo_pending = TG3_DEF_RX_JUMBO_RING_PENDING;
10534 tp->tx_pending = TG3_DEF_TX_RING_PENDING;
10535
10536 dev->open = tg3_open;
10537 dev->stop = tg3_close;
10538 dev->get_stats = tg3_get_stats;
10539 dev->set_multicast_list = tg3_set_rx_mode;
10540 dev->set_mac_address = tg3_set_mac_addr;
10541 dev->do_ioctl = tg3_ioctl;
10542 dev->tx_timeout = tg3_tx_timeout;
10543 dev->poll = tg3_poll;
10544 dev->ethtool_ops = &tg3_ethtool_ops;
10545 dev->weight = 64;
10546 dev->watchdog_timeo = TG3_TX_TIMEOUT;
10547 dev->change_mtu = tg3_change_mtu;
10548 dev->irq = pdev->irq;
10549#ifdef CONFIG_NET_POLL_CONTROLLER
10550 dev->poll_controller = tg3_poll_controller;
10551#endif
10552
10553 err = tg3_get_invariants(tp);
10554 if (err) {
10555 printk(KERN_ERR PFX "Problem fetching invariants of chip, "
10556 "aborting.\n");
10557 goto err_out_iounmap;
10558 }
10559
Michael Chanfdfec172005-07-25 12:31:48 -070010560 tg3_init_bufmgr_config(tp);
Linus Torvalds1da177e2005-04-16 15:20:36 -070010561
10562#if TG3_TSO_SUPPORT != 0
10563 if (tp->tg3_flags2 & TG3_FLG2_HW_TSO) {
10564 tp->tg3_flags2 |= TG3_FLG2_TSO_CAPABLE;
10565 }
10566 else if (GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_5700 ||
10567 GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_5701 ||
10568 tp->pci_chip_rev_id == CHIPREV_ID_5705_A0 ||
10569 (tp->tg3_flags & TG3_FLAG_ENABLE_ASF) != 0) {
10570 tp->tg3_flags2 &= ~TG3_FLG2_TSO_CAPABLE;
10571 } else {
10572 tp->tg3_flags2 |= TG3_FLG2_TSO_CAPABLE;
10573 }
10574
10575 /* TSO is off by default, user can enable using ethtool. */
10576#if 0
10577 if (tp->tg3_flags2 & TG3_FLG2_TSO_CAPABLE)
10578 dev->features |= NETIF_F_TSO;
10579#endif
10580
10581#endif
10582
10583 if (tp->pci_chip_rev_id == CHIPREV_ID_5705_A1 &&
10584 !(tp->tg3_flags2 & TG3_FLG2_TSO_CAPABLE) &&
10585 !(tr32(TG3PCI_PCISTATE) & PCISTATE_BUS_SPEED_HIGH)) {
10586 tp->tg3_flags2 |= TG3_FLG2_MAX_RXPEND_64;
10587 tp->rx_pending = 63;
10588 }
10589
10590 if (GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_5704)
10591 tp->pdev_peer = tg3_find_5704_peer(tp);
10592
10593 err = tg3_get_device_address(tp);
10594 if (err) {
10595 printk(KERN_ERR PFX "Could not obtain valid ethernet address, "
10596 "aborting.\n");
10597 goto err_out_iounmap;
10598 }
10599
10600 /*
10601 * Reset chip in case UNDI or EFI driver did not shutdown
10602 * DMA self test will enable WDMAC and we'll see (spurious)
10603 * pending DMA on the PCI bus at that point.
10604 */
10605 if ((tr32(HOSTCC_MODE) & HOSTCC_MODE_ENABLE) ||
10606 (tr32(WDMAC_MODE) & WDMAC_MODE_ENABLE)) {
10607 pci_save_state(tp->pdev);
10608 tw32(MEMARB_MODE, MEMARB_MODE_ENABLE);
Michael Chan944d9802005-05-29 14:57:48 -070010609 tg3_halt(tp, RESET_KIND_SHUTDOWN, 1);
Linus Torvalds1da177e2005-04-16 15:20:36 -070010610 }
10611
10612 err = tg3_test_dma(tp);
10613 if (err) {
10614 printk(KERN_ERR PFX "DMA engine test failed, aborting.\n");
10615 goto err_out_iounmap;
10616 }
10617
10618 /* Tigon3 can do ipv4 only... and some chips have buggy
10619 * checksumming.
10620 */
10621 if ((tp->tg3_flags & TG3_FLAG_BROKEN_CHECKSUMS) == 0) {
10622 dev->features |= NETIF_F_SG | NETIF_F_IP_CSUM;
10623 tp->tg3_flags |= TG3_FLAG_RX_CHECKSUMS;
10624 } else
10625 tp->tg3_flags &= ~TG3_FLAG_RX_CHECKSUMS;
10626
10627 if (tp->tg3_flags2 & TG3_FLG2_IS_5788)
10628 dev->features &= ~NETIF_F_HIGHDMA;
10629
10630 /* flow control autonegotiation is default behavior */
10631 tp->tg3_flags |= TG3_FLAG_PAUSE_AUTONEG;
10632
David S. Miller15f98502005-05-18 22:49:26 -070010633 tg3_init_coal(tp);
10634
David S. Miller7d3f4c92005-08-06 06:35:48 -070010635 /* Now that we have fully setup the chip, save away a snapshot
10636 * of the PCI config space. We need to restore this after
10637 * GRC_MISC_CFG core clock resets and some resume events.
10638 */
10639 pci_save_state(tp->pdev);
10640
Linus Torvalds1da177e2005-04-16 15:20:36 -070010641 err = register_netdev(dev);
10642 if (err) {
10643 printk(KERN_ERR PFX "Cannot register net device, "
10644 "aborting.\n");
10645 goto err_out_iounmap;
10646 }
10647
10648 pci_set_drvdata(pdev, dev);
10649
Linus Torvalds1da177e2005-04-16 15:20:36 -070010650 printk(KERN_INFO "%s: Tigon3 [partno(%s) rev %04x PHY(%s)] (PCI%s:%s:%s) %sBaseT Ethernet ",
10651 dev->name,
10652 tp->board_part_number,
10653 tp->pci_chip_rev_id,
10654 tg3_phy_string(tp),
10655 ((tp->tg3_flags & TG3_FLAG_PCIX_MODE) ? "X" : ""),
10656 ((tp->tg3_flags & TG3_FLAG_PCI_HIGH_SPEED) ?
10657 ((tp->tg3_flags & TG3_FLAG_PCIX_MODE) ? "133MHz" : "66MHz") :
10658 ((tp->tg3_flags & TG3_FLAG_PCIX_MODE) ? "100MHz" : "33MHz")),
10659 ((tp->tg3_flags & TG3_FLAG_PCI_32BIT) ? "32-bit" : "64-bit"),
10660 (tp->tg3_flags & TG3_FLAG_10_100_ONLY) ? "10/100" : "10/100/1000");
10661
10662 for (i = 0; i < 6; i++)
10663 printk("%2.2x%c", dev->dev_addr[i],
10664 i == 5 ? '\n' : ':');
10665
10666 printk(KERN_INFO "%s: RXcsums[%d] LinkChgREG[%d] "
10667 "MIirq[%d] ASF[%d] Split[%d] WireSpeed[%d] "
10668 "TSOcap[%d] \n",
10669 dev->name,
10670 (tp->tg3_flags & TG3_FLAG_RX_CHECKSUMS) != 0,
10671 (tp->tg3_flags & TG3_FLAG_USE_LINKCHG_REG) != 0,
10672 (tp->tg3_flags & TG3_FLAG_USE_MI_INTERRUPT) != 0,
10673 (tp->tg3_flags & TG3_FLAG_ENABLE_ASF) != 0,
10674 (tp->tg3_flags & TG3_FLAG_SPLIT_MODE) != 0,
10675 (tp->tg3_flags2 & TG3_FLG2_NO_ETH_WIRE_SPEED) == 0,
10676 (tp->tg3_flags2 & TG3_FLG2_TSO_CAPABLE) != 0);
David S. Miller59e6b432005-05-18 22:50:10 -070010677 printk(KERN_INFO "%s: dma_rwctrl[%08x]\n",
10678 dev->name, tp->dma_rwctrl);
Linus Torvalds1da177e2005-04-16 15:20:36 -070010679
10680 return 0;
10681
10682err_out_iounmap:
Michael Chan68929142005-08-09 20:17:14 -070010683 if (tp->regs) {
10684 iounmap(tp->regs);
10685 tp->regs = 0;
10686 }
Linus Torvalds1da177e2005-04-16 15:20:36 -070010687
10688err_out_free_dev:
10689 free_netdev(dev);
10690
10691err_out_free_res:
10692 pci_release_regions(pdev);
10693
10694err_out_disable_pdev:
10695 pci_disable_device(pdev);
10696 pci_set_drvdata(pdev, NULL);
10697 return err;
10698}
10699
10700static void __devexit tg3_remove_one(struct pci_dev *pdev)
10701{
10702 struct net_device *dev = pci_get_drvdata(pdev);
10703
10704 if (dev) {
10705 struct tg3 *tp = netdev_priv(dev);
10706
10707 unregister_netdev(dev);
Michael Chan68929142005-08-09 20:17:14 -070010708 if (tp->regs) {
10709 iounmap(tp->regs);
10710 tp->regs = 0;
10711 }
Linus Torvalds1da177e2005-04-16 15:20:36 -070010712 free_netdev(dev);
10713 pci_release_regions(pdev);
10714 pci_disable_device(pdev);
10715 pci_set_drvdata(pdev, NULL);
10716 }
10717}
10718
10719static int tg3_suspend(struct pci_dev *pdev, pm_message_t state)
10720{
10721 struct net_device *dev = pci_get_drvdata(pdev);
10722 struct tg3 *tp = netdev_priv(dev);
10723 int err;
10724
10725 if (!netif_running(dev))
10726 return 0;
10727
10728 tg3_netif_stop(tp);
10729
10730 del_timer_sync(&tp->timer);
10731
David S. Millerf47c11e2005-06-24 20:18:35 -070010732 tg3_full_lock(tp, 1);
Linus Torvalds1da177e2005-04-16 15:20:36 -070010733 tg3_disable_ints(tp);
David S. Millerf47c11e2005-06-24 20:18:35 -070010734 tg3_full_unlock(tp);
Linus Torvalds1da177e2005-04-16 15:20:36 -070010735
10736 netif_device_detach(dev);
10737
David S. Millerf47c11e2005-06-24 20:18:35 -070010738 tg3_full_lock(tp, 0);
Michael Chan944d9802005-05-29 14:57:48 -070010739 tg3_halt(tp, RESET_KIND_SHUTDOWN, 1);
David S. Millerf47c11e2005-06-24 20:18:35 -070010740 tg3_full_unlock(tp);
Linus Torvalds1da177e2005-04-16 15:20:36 -070010741
10742 err = tg3_set_power_state(tp, pci_choose_state(pdev, state));
10743 if (err) {
David S. Millerf47c11e2005-06-24 20:18:35 -070010744 tg3_full_lock(tp, 0);
Linus Torvalds1da177e2005-04-16 15:20:36 -070010745
10746 tg3_init_hw(tp);
10747
10748 tp->timer.expires = jiffies + tp->timer_offset;
10749 add_timer(&tp->timer);
10750
10751 netif_device_attach(dev);
10752 tg3_netif_start(tp);
10753
David S. Millerf47c11e2005-06-24 20:18:35 -070010754 tg3_full_unlock(tp);
Linus Torvalds1da177e2005-04-16 15:20:36 -070010755 }
10756
10757 return err;
10758}
10759
10760static int tg3_resume(struct pci_dev *pdev)
10761{
10762 struct net_device *dev = pci_get_drvdata(pdev);
10763 struct tg3 *tp = netdev_priv(dev);
10764 int err;
10765
10766 if (!netif_running(dev))
10767 return 0;
10768
10769 pci_restore_state(tp->pdev);
10770
10771 err = tg3_set_power_state(tp, 0);
10772 if (err)
10773 return err;
10774
10775 netif_device_attach(dev);
10776
David S. Millerf47c11e2005-06-24 20:18:35 -070010777 tg3_full_lock(tp, 0);
Linus Torvalds1da177e2005-04-16 15:20:36 -070010778
10779 tg3_init_hw(tp);
10780
10781 tp->timer.expires = jiffies + tp->timer_offset;
10782 add_timer(&tp->timer);
10783
Linus Torvalds1da177e2005-04-16 15:20:36 -070010784 tg3_netif_start(tp);
10785
David S. Millerf47c11e2005-06-24 20:18:35 -070010786 tg3_full_unlock(tp);
Linus Torvalds1da177e2005-04-16 15:20:36 -070010787
10788 return 0;
10789}
10790
10791static struct pci_driver tg3_driver = {
10792 .name = DRV_MODULE_NAME,
10793 .id_table = tg3_pci_tbl,
10794 .probe = tg3_init_one,
10795 .remove = __devexit_p(tg3_remove_one),
10796 .suspend = tg3_suspend,
10797 .resume = tg3_resume
10798};
10799
10800static int __init tg3_init(void)
10801{
10802 return pci_module_init(&tg3_driver);
10803}
10804
10805static void __exit tg3_cleanup(void)
10806{
10807 pci_unregister_driver(&tg3_driver);
10808}
10809
10810module_init(tg3_init);
10811module_exit(tg3_cleanup);